]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Make Kokkos a required dependency and bundle Kokkos 3.7.00
authorDaniel Arndt <arndtd@ornl.gov>
Tue, 22 Nov 2022 14:57:38 +0000 (09:57 -0500)
committerDaniel Arndt <arndtd@ornl.gov>
Tue, 22 Nov 2022 14:57:38 +0000 (09:57 -0500)
536 files changed:
bundled/CMakeLists.txt
bundled/README.md
bundled/kokkos-3.7.00/CMakeLists.txt [new file with mode: 0644]
bundled/kokkos-3.7.00/Copyright.txt [new file with mode: 0644]
bundled/kokkos-3.7.00/LICENSE [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/KokkosAlgorithms_dummy.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/Kokkos_Random.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/Kokkos_Sort.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/Kokkos_StdAlgorithms.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AdjacentDifference.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AdjacentFind.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AllOf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AnyOf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_BeginEnd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Copy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyBackward.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Count.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CountIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Distance.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Equal.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ExclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Fill.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FillN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Find.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindEnd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindFirstOf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindIfNot.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ForEach.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ForEachN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Generate.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_GenerateN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_InclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsPartitioned.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsSorted.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsSortedUntil.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IterSwap.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_LexicographicalCompare.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MaxElement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MinElement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MinMaxElement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Mismatch.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Move.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MoveBackward.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_NoneOf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_PartitionCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_PartitionPoint.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Reduce.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Remove.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveCopyIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Replace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceCopyIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Reverse.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReverseCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Rotate.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RotateCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Search.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_SearchN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ShiftLeft.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ShiftRight.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Swap.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_SwapRanges.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Transform.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformExclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformInclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformReduce.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Unique.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_UniqueCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AdjacentDifference.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AdjacentFind.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AllOfAnyOfNoneOf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Constraints.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyBackward.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyCopyN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CountCountIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Equal.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ExclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FillFillN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindEnd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindFirstOf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindIfOrNot.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ForEachForEachN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_GenerateGenerateN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_HelperPredicates.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IdentityReferenceUnaryFunctor.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_InclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsPartitioned.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsSorted.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsSortedUntil.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_LexicographicalCompare.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_MinMaxMinmaxElement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Mismatch.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Move.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_MoveBackward.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_PartitionCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_PartitionPoint.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RandomAccessIterator.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Reduce.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReducerWithArbitraryJoinerNoNeutralElement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RemoveAllVariants.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Replace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceCopyIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceIf.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Reverse.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReverseCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Rotate.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RotateCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Search.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_SearchN.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ShiftLeft.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ShiftRight.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_SwapRanges.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Transform.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformExclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformInclusiveScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformReduce.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Unique.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_UniqueCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ValueWrapperForNoNeutralElement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_Bitset.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_DualView.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_DynRankView.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_DynamicView.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_ErrorReporter.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_Functional.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_OffsetView.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_ScatterView.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_StaticCrsGraph.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_UnorderedMap.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/Kokkos_Vector.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/impl/Kokkos_Bitset_impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/impl/Kokkos_Functional_impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/impl/Kokkos_StaticCrsGraph_factory.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/impl/Kokkos_UnorderedMap_impl.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/containers/src/impl/Kokkos_UnorderedMap_impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_CudaSpace.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Alloc.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Error.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_GraphNodeKernel.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_GraphNode_Impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Graph_Impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Half_Conversion.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Half_Impl_Type.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Instance.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Instance.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_KernelLaunch.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Locks.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Locks.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_MDRangePolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_MDRange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_Range.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_ReduceScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Task.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Task.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_UniqueToken.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Vectorization.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_View.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_WorkGraphPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_abort.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Abort.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Atomic.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_BlockSize_Deduction.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Error.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Half_Conversion.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Half_Impl_Type.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Instance.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Instance.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_KernelLaunch.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Locks.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Locks.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_MDRangePolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_MDRange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_Range.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_ReduceScan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Shuffle_Reduce.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Space.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_UniqueToken.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Vectorization.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_WorkGraphPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_Task.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_Task.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_WorkGraphPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/KokkosCore_Config_DeclareBackend.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/KokkosCore_Config_FwdBackend.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/KokkosCore_Config_PostInclude.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/KokkosCore_Config_SetupBackend.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/KokkosCore_config.h [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/KokkosExp_InterOp.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/KokkosExp_MDRangePolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_AcquireUniqueTokenImpl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_AnonymousSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Array.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Atomic.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Config.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Volatile_Wrapper.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Wrapper.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Complex.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Concepts.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_CopyViews.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Core.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Core_fwd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Crs.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Cuda.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_CudaSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_DetectionIdiom.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_ExecPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Extents.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Future.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Graph.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_GraphNode.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Graph_fwd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_HBWSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_HIP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_HIP_Space.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_HPX.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Half.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_HostSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Layout.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_LogicalSpaces.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Macros.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_MasterLock.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalConstants.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalFunctions.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalSpecialFunctions.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_MemoryPool.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_MemoryTraits.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_MinMaxClamp.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_NumericTraits.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_OpenMP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_OpenMPTarget.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_OpenMPTargetSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Pair.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Parallel.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Parallel_Reduce.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_PointerOwnership.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Profiling_ProfileSection.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Rank.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_SYCL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_SYCL_Space.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_ScratchSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Serial.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_TaskPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_TaskScheduler.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_TaskScheduler_fwd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Threads.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Timer.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Tuners.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_UniqueToken.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_Vectorization.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_View.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_WorkGraphPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Kokkos_hwloc.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACCSpace.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACCSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Instance.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Instance.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Traits.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Instance.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Instance.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Parallel.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Task.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Task.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_WorkGraphPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTargetSpace.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Abort.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Error.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Parallel_MDRange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Abort.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_DeepCopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Half_Conversion.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Half_Impl_Type.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Instance.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Instance.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_MDRangePolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Range.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Reduce.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Scan.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Space.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_UniqueToken.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_MDRange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_Range.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Task.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Task.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_UniqueToken.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_WorkGraphPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsExec.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsExec.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsTeam.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_MDRange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_Range.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_Team.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_UniqueToken.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_WorkGraphPolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/View/Hooks/Kokkos_ViewHooks.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_CUDA.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HBWSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HIP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HPX.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENACC.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENMP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENMPTARGET.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_SERIAL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_SYCL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_THREADS.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/dummy.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_CUDA.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HBWSpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HIP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HPX.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENACC.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENMP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENMPTARGET.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_SERIAL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_SYCL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_THREADS.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/KokkosExp_Host_IterateTile.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/KokkosExp_IterateTileGPU.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/KokkosExp_ViewMapping.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_AnalyzePolicy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Assembly.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Compare_Exchange_Strong.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Compare_Exchange_Weak.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Decrement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Exchange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Add.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_And.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Or.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Sub.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Generic.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Generic_Secondary.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Increment.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Load.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Memory_Order.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_MinMax.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Store.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_View.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Windows.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_BitOps.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_CPUDiscovery.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_CPUDiscovery.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ChaseLev.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ClockTic.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Combined_Reducer.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Command_Line_Parsing.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Command_Line_Parsing.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ConcurrentBitset.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Core.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_GraphNodeKernel.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_GraphNode_Impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_Graph_Impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_Graph_fwd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_DeviceManagement.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_EBO.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Error.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Error.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ExecPolicy.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ExecSpaceManager.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_FixedBufferMemoryPool.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_FunctorAnalysis.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl_Utilities.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl_fwd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphNodeCustomization.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphNodeImpl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HBWSpace.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostBarrier.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostBarrier.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSharedPtr.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace_deepcopy.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace_deepcopy.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostThreadTeam.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostThreadTeam.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_InitializationSettings.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_LIFO.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_LinkedListNode.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemoryPool.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemoryPoolAllocator.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemorySpace.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemorySpace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Memory_Fence.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_MultipleTaskQueue.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_NumericTraits.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_OptionalRef.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ParseCommandLineArgumentsAndEnvironmentVariables.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_PhysicalLayout.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_C_Interface.h [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_DeviceInfo.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_Interface.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_QuadPrecisionMath.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc_timpl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_SimpleTaskScheduler.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_SingleTaskQueue.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Spinwait.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Spinwait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Stacktrace.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Stacktrace.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_StringManipulation.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskBase.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskNode.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskPolicyData.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueue.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueCommon.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMemoryManager.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMultiple.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMultiple_impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueue_impl.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskResult.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskTeamMember.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Timer.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Tools.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Tools_Generic.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Traits.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Utilities.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_VLAEmulation.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewArray.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewCtor.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewLayoutTiled.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewMapping.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewTracker.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewUniformType.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_Volatile_Load.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/impl/Kokkos_hwloc.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_Cuda.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_HIP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_SYCL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_ExecutionSpaceTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_GraphKernelTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_IndexTypeTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_IterationPatternTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_LaunchBoundsTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_OccupancyControlTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_PolicyTraitAdaptor.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_PolicyTraitMatcher.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_ScheduleTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_Traits_fwd.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_WorkItemPropertyTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/core/src/traits/Kokkos_WorkTagTrait.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/simd/cmake/Dependencies.cmake [new file with mode: 0644]
bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_AVX512.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_Common.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_Scalar.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_dummy.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/.clang-format [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Atomic_Ref.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/CUDA.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Common.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_CUDA.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_GCC.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_HIP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_MSVC.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_OpenMP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_SYCL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_ScopeCaller.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_Serial.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/GCC.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Generic.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/HIP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array_Cuda.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array_HIP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Macros.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/OpenMP.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/SYCL.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/SYCLConversions.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/CUDA_asm.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/CUDA_asm_exchange.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_forceglobal [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_generic [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_isglobal [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_predicate [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_forceglobal [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_generic [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_isglobal [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_predicate [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange_memorder.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange_op.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_memorder.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/openmp/OpenMP_40.hpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/openmp/OpenMP_40_op.inc [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/src/Lock_Array_CUDA.cpp [new file with mode: 0644]
bundled/kokkos-3.7.00/tpls/desul/src/Lock_Array_HIP.cpp [new file with mode: 0644]
bundled/setup_bundled.cmake
cmake/configure/configure_20_trilinos.cmake
cmake/configure/configure_30_kokkos.cmake
cmake/modules/FindKOKKOS.cmake
doc/developers/packaging.html
doc/news/changes/major/20221026Arndt [new file with mode: 0644]
doc/readme.html
doc/users/cmake_dealii.html
include/deal.II/base/config.h.in
tests/quick_tests/CMakeLists.txt
tests/quick_tests/kokkos.cc [new file with mode: 0644]

index 64ef8f272c400e41ef178df34cb164f42be93f05..a1e00489ced7d46fae82f01691137c23d17006af 100644 (file)
@@ -37,6 +37,16 @@ IF(FEATURE_BOOST_BUNDLED_CONFIGURED)
 ENDIF()
 
 
+IF(FEATURE_KOKKOS_BUNDLED_CONFIGURED)
+  ADD_SUBDIRECTORY(${KOKKOS_FOLDER})
+
+  INSTALL(DIRECTORY ${KOKKOS_BUNDLED_INCLUDE_DIRS}
+    DESTINATION ${DEAL_II_INCLUDE_RELDIR}/deal.II/bundled
+    COMPONENT library
+    )
+ENDIF()
+
+
 IF(FEATURE_TASKFLOW_BUNDLED_CONFIGURED)
   INSTALL(DIRECTORY ${TASKFLOW_FOLDER}/include/taskflow
     DESTINATION ${DEAL_II_INCLUDE_RELDIR}/deal.II/bundled
index 2cd62d722289f7058994c6a2d4dadc85d48a79a7..1eaa660a51f1a948893eb40670cdc4e5944a6109 100644 (file)
@@ -19,6 +19,15 @@ and licensed under the Boost Software License Version 1.0. See
 A full version of the library can be downloaded at http://www.boost.org/.
 
 
+kokkos-*
+--------
+
+Contains the Kokkos project licensed under the 3-clause BSD license.
+
+A full version of the Kokkos project can be downloaded at
+https://github.com/kokkos/kokkos.
+
+
 taskflow-*
 --------------
 
diff --git a/bundled/kokkos-3.7.00/CMakeLists.txt b/bundled/kokkos-3.7.00/CMakeLists.txt
new file mode 100644 (file)
index 0000000..f0556a9
--- /dev/null
@@ -0,0 +1,73 @@
+## ---------------------------------------------------------------------
+##
+## Copyright (C) 2022 by the deal.II authors
+##
+## This file is part of the deal.II library.
+##
+## The deal.II library is free software; you can use it, redistribute
+## it, and/or modify it under the terms of the GNU Lesser General
+## Public License as published by the Free Software Foundation; either
+## version 2.1 of the License, or (at your option) any later version.
+## The full text of the license can be found in the file LICENSE.md at
+## the top level directory of deal.II.
+##
+## ---------------------------------------------------------------------
+
+SET(src_kokkos
+  #tpls/desul/src/Lock_Array_CUDA.cpp
+  #tpls/desul/src/Lock_Array_HIP.cpp
+  core/src/impl/Kokkos_Profiling.cpp
+  core/src/impl/Kokkos_NumericTraits.cpp
+  core/src/impl/Kokkos_HostSpace_deepcopy.cpp
+  core/src/impl/Kokkos_Error.cpp
+  core/src/impl/Kokkos_Spinwait.cpp
+  core/src/impl/Kokkos_MemoryPool.cpp
+  core/src/impl/Kokkos_HostThreadTeam.cpp
+  core/src/impl/Kokkos_hwloc.cpp
+  core/src/impl/Kokkos_MemorySpace.cpp
+  core/src/impl/Kokkos_HBWSpace.cpp
+  core/src/impl/Kokkos_HostBarrier.cpp
+  core/src/impl/Kokkos_Core.cpp
+  core/src/impl/Kokkos_HostSpace.cpp
+  core/src/impl/Kokkos_CPUDiscovery.cpp
+  core/src/impl/Kokkos_Stacktrace.cpp
+  core/src/impl/Kokkos_SharedAlloc.cpp
+  core/src/impl/Kokkos_Command_Line_Parsing.cpp
+  core/src/impl/Kokkos_ExecPolicy.cpp
+  #core/src/OpenMPTarget/Kokkos_OpenMPTargetSpace.cpp
+  #core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.cpp
+  #core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.cpp
+  #core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.cpp
+  #core/src/OpenACC/Kokkos_OpenACCSpace.cpp
+  #core/src/OpenACC/Kokkos_OpenACC.cpp
+  #core/src/OpenACC/Kokkos_OpenACC_Instance.cpp
+  #core/src/Cuda/Kokkos_Cuda_Instance.cpp
+  #core/src/Cuda/Kokkos_Cuda_Task.cpp
+  #core/src/Cuda/Kokkos_CudaSpace.cpp
+  #core/src/Cuda/Kokkos_Cuda_Locks.cpp
+  #core/src/HPX/Kokkos_HPX_Task.cpp
+  #core/src/HPX/Kokkos_HPX.cpp
+  core/src/dummy.cpp
+  #core/src/Threads/Kokkos_ThreadsExec.cpp
+  #core/src/HIP/Kokkos_HIP_Instance.cpp
+  #core/src/HIP/Kokkos_HIP_Space.cpp
+  #core/src/HIP/Kokkos_HIP_Locks.cpp
+  #core/src/SYCL/Kokkos_SYCL_Space.cpp
+  #core/src/SYCL/Kokkos_SYCL.cpp
+  #core/src/SYCL/Kokkos_SYCL_Instance.cpp
+  core/src/Serial/Kokkos_Serial.cpp
+  core/src/Serial/Kokkos_Serial_Task.cpp
+  #core/src/OpenMP/Kokkos_OpenMP_Instance.cpp
+  #core/src/OpenMP/Kokkos_OpenMP_Task.cpp
+  algorithms/src/KokkosAlgorithms_dummy.cpp
+  simd/src/Kokkos_SIMD_dummy.cpp
+  containers/src/impl/Kokkos_UnorderedMap_impl.cpp
+)
+INCLUDE_DIRECTORIES(${KOKKOS_BUNDLED_INCLUDE_DIRS})
+
+ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS -Wno-float-conversion)
+ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS -Wno-missing-field-initializers)
+ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS -Wno-suggest-override)
+ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS -Wno-unused-but-set-parameter)
+
+DEAL_II_ADD_LIBRARY(obj_kokkos OBJECT ${src_kokkos})
diff --git a/bundled/kokkos-3.7.00/Copyright.txt b/bundled/kokkos-3.7.00/Copyright.txt
new file mode 100644 (file)
index 0000000..5e2f8d8
--- /dev/null
@@ -0,0 +1,41 @@
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
diff --git a/bundled/kokkos-3.7.00/LICENSE b/bundled/kokkos-3.7.00/LICENSE
new file mode 100644 (file)
index 0000000..c6f1708
--- /dev/null
@@ -0,0 +1,43 @@
+//@HEADER
+// ************************************************************************
+// 
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+// 
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Kokkos is licensed under 3-clause BSD terms of use:
+// 
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+// 
+// ************************************************************************
+//@HEADER
diff --git a/bundled/kokkos-3.7.00/algorithms/src/KokkosAlgorithms_dummy.cpp b/bundled/kokkos-3.7.00/algorithms/src/KokkosAlgorithms_dummy.cpp
new file mode 100644 (file)
index 0000000..9c08a08
--- /dev/null
@@ -0,0 +1 @@
+void KOKKOS_ALGORITHMS_SRC_DUMMY_PREVENT_LINK_ERROR() {}
diff --git a/bundled/kokkos-3.7.00/algorithms/src/Kokkos_Random.hpp b/bundled/kokkos-3.7.00/algorithms/src/Kokkos_Random.hpp
new file mode 100644 (file)
index 0000000..1d85ffd
--- /dev/null
@@ -0,0 +1,1601 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_RANDOM_HPP
+#define KOKKOS_RANDOM_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_RANDOM
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <Kokkos_Complex.hpp>
+#include <cstdio>
+#include <cstdlib>
+#include <cmath>
+
+/// \file Kokkos_Random.hpp
+/// \brief Pseudorandom number generators
+///
+/// These generators are based on Vigna, Sebastiano (2014). "An
+/// experimental exploration of Marsaglia's xorshift generators,
+/// scrambled."  See: http://arxiv.org/abs/1402.6246
+
+namespace Kokkos {
+
+// clang-format off
+  /*Template functions to get equidistributed random numbers from a generator for a specific Scalar type
+
+       template<class Generator,Scalar>
+       struct rand{
+
+         //Max value returned by draw(Generator& gen)
+         KOKKOS_INLINE_FUNCTION
+         static Scalar max();
+
+         //Returns a value between zero and max()
+         KOKKOS_INLINE_FUNCTION
+         static Scalar draw(Generator& gen);
+
+         //Returns a value between zero and range()
+         //Note: for floating point values range can be larger than max()
+         KOKKOS_INLINE_FUNCTION
+         static Scalar draw(Generator& gen, const Scalar& range){}
+
+         //Return value between start and end
+         KOKKOS_INLINE_FUNCTION
+         static Scalar draw(Generator& gen, const Scalar& start, const Scalar& end);
+      };
+
+    The Random number generators themselves have two components a state-pool and the actual generator
+    A state-pool manages a number of generators, so that each active thread is able to grep its own.
+    This allows the generation of random numbers which are independent between threads. Note that
+    in contrast to CuRand none of the functions of the pool (or the generator) are collectives,
+    i.e. all functions can be called inside conditionals.
+
+    template<class Device>
+    class Pool {
+     public:
+      //The Kokkos device type
+      using device_type = Device;
+      //The actual generator type
+      using generator_type = Generator<Device>;
+
+      //Default constructor: does not initialize a pool
+      Pool();
+
+      //Initializing constructor: calls init(seed,Device_Specific_Number);
+      Pool(unsigned int seed);
+
+      //Initialize Pool with seed as a starting seed with a pool_size of num_states
+      //The Random_XorShift64 generator is used in serial to initialize all states,
+      //thus the initialization process is platform independent and deterministic.
+      void init(unsigned int seed, int num_states);
+
+      //Get a generator. This will lock one of the states, guaranteeing that each thread
+      //will have its private generator. Note: on Cuda getting a state involves atomics,
+      //and is thus not deterministic!
+      generator_type get_state();
+
+      //Give a state back to the pool. This unlocks the state, and writes the modified
+      //state of the generator back to the pool.
+      void free_state(generator_type gen);
+
+    }
+
+    template<class Device>
+    class Generator {
+     public:
+     //The Kokkos device type
+    using device_type = DeviceType;
+
+    //Max return values of respective [X]rand[S]() functions
+    enum {MAX_URAND = 0xffffffffU};
+    enum {MAX_URAND64 = 0xffffffffffffffffULL-1};
+    enum {MAX_RAND = static_cast<int>(0xffffffffU/2)};
+    enum {MAX_RAND64 = static_cast<int64_t>(0xffffffffffffffffULL/2-1)};
+
+
+    //Init with a state and the idx with respect to pool. Note: in serial the
+    //Generator can be used by just giving it the necessary state arguments
+    KOKKOS_INLINE_FUNCTION
+    Generator (STATE_ARGUMENTS, int state_idx = 0);
+
+    //Draw a equidistributed uint32_t in the range [0,MAX_URAND)
+    KOKKOS_INLINE_FUNCTION
+    uint32_t urand();
+
+    //Draw a equidistributed uint64_t in the range [0,MAX_URAND64)
+    KOKKOS_INLINE_FUNCTION
+    uint64_t urand64();
+
+    //Draw a equidistributed uint32_t in the range [0,range)
+    KOKKOS_INLINE_FUNCTION
+    uint32_t urand(const uint32_t& range);
+
+    //Draw a equidistributed uint32_t in the range [start,end)
+    KOKKOS_INLINE_FUNCTION
+    uint32_t urand(const uint32_t& start, const uint32_t& end );
+
+    //Draw a equidistributed uint64_t in the range [0,range)
+    KOKKOS_INLINE_FUNCTION
+    uint64_t urand64(const uint64_t& range);
+
+    //Draw a equidistributed uint64_t in the range [start,end)
+    KOKKOS_INLINE_FUNCTION
+    uint64_t urand64(const uint64_t& start, const uint64_t& end );
+
+    //Draw a equidistributed int in the range [0,MAX_RAND)
+    KOKKOS_INLINE_FUNCTION
+    int rand();
+
+    //Draw a equidistributed int in the range [0,range)
+    KOKKOS_INLINE_FUNCTION
+    int rand(const int& range);
+
+    //Draw a equidistributed int in the range [start,end)
+    KOKKOS_INLINE_FUNCTION
+    int rand(const int& start, const int& end );
+
+    //Draw a equidistributed int64_t in the range [0,MAX_RAND64)
+    KOKKOS_INLINE_FUNCTION
+    int64_t rand64();
+
+    //Draw a equidistributed int64_t in the range [0,range)
+    KOKKOS_INLINE_FUNCTION
+    int64_t rand64(const int64_t& range);
+
+    //Draw a equidistributed int64_t in the range [start,end)
+    KOKKOS_INLINE_FUNCTION
+    int64_t rand64(const int64_t& start, const int64_t& end );
+
+    //Draw a equidistributed float in the range [0,1.0)
+    KOKKOS_INLINE_FUNCTION
+    float frand();
+
+    //Draw a equidistributed float in the range [0,range)
+    KOKKOS_INLINE_FUNCTION
+    float frand(const float& range);
+
+    //Draw a equidistributed float in the range [start,end)
+    KOKKOS_INLINE_FUNCTION
+    float frand(const float& start, const float& end );
+
+    //Draw a equidistributed double in the range [0,1.0)
+    KOKKOS_INLINE_FUNCTION
+    double drand();
+
+    //Draw a equidistributed double in the range [0,range)
+    KOKKOS_INLINE_FUNCTION
+    double drand(const double& range);
+
+    //Draw a equidistributed double in the range [start,end)
+    KOKKOS_INLINE_FUNCTION
+    double drand(const double& start, const double& end );
+
+    //Draw a standard normal distributed double
+    KOKKOS_INLINE_FUNCTION
+    double normal() ;
+
+    //Draw a normal distributed double with given mean and standard deviation
+    KOKKOS_INLINE_FUNCTION
+    double normal(const double& mean, const double& std_dev=1.0);
+    }
+
+    //Additional Functions:
+
+    //Fills view with random numbers in the range [0,range)
+    template<class ViewType, class PoolType>
+    void fill_random(ViewType view, PoolType pool, ViewType::value_type range);
+
+    //Fills view with random numbers in the range [start,end)
+    template<class ViewType, class PoolType>
+    void fill_random(ViewType view, PoolType pool,
+                     ViewType::value_type start, ViewType::value_type end);
+
+*/
+// clang-format on
+
+template <class Generator, class Scalar>
+struct rand;
+
+template <class Generator>
+struct rand<Generator, char> {
+  KOKKOS_INLINE_FUNCTION
+  static short max() { return 127; }
+  KOKKOS_INLINE_FUNCTION
+  static short draw(Generator& gen) {
+    return short((gen.rand() & 0xff + 256) % 256);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static short draw(Generator& gen, const char& range) {
+    return char(gen.rand(range));
+  }
+  KOKKOS_INLINE_FUNCTION
+  static short draw(Generator& gen, const char& start, const char& end) {
+    return char(gen.rand(start, end));
+  }
+};
+
+template <class Generator>
+struct rand<Generator, short> {
+  KOKKOS_INLINE_FUNCTION
+  static short max() { return 32767; }
+  KOKKOS_INLINE_FUNCTION
+  static short draw(Generator& gen) {
+    return short((gen.rand() & 0xffff + 65536) % 32768);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static short draw(Generator& gen, const short& range) {
+    return short(gen.rand(range));
+  }
+  KOKKOS_INLINE_FUNCTION
+  static short draw(Generator& gen, const short& start, const short& end) {
+    return short(gen.rand(start, end));
+  }
+};
+
+template <class Generator>
+struct rand<Generator, int> {
+  KOKKOS_INLINE_FUNCTION
+  static int max() { return Generator::MAX_RAND; }
+  KOKKOS_INLINE_FUNCTION
+  static int draw(Generator& gen) { return gen.rand(); }
+  KOKKOS_INLINE_FUNCTION
+  static int draw(Generator& gen, const int& range) { return gen.rand(range); }
+  KOKKOS_INLINE_FUNCTION
+  static int draw(Generator& gen, const int& start, const int& end) {
+    return gen.rand(start, end);
+  }
+};
+
+template <class Generator>
+struct rand<Generator, unsigned int> {
+  KOKKOS_INLINE_FUNCTION
+  static unsigned int max() { return Generator::MAX_URAND; }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned int draw(Generator& gen) { return gen.urand(); }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned int draw(Generator& gen, const unsigned int& range) {
+    return gen.urand(range);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned int draw(Generator& gen, const unsigned int& start,
+                           const unsigned int& end) {
+    return gen.urand(start, end);
+  }
+};
+
+template <class Generator>
+struct rand<Generator, long> {
+  KOKKOS_INLINE_FUNCTION
+  static long max() {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(long) == 4 ? static_cast<long>(Generator::MAX_RAND)
+                             : static_cast<long>(Generator::MAX_RAND64);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static long draw(Generator& gen) {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(long) == 4 ? static_cast<long>(gen.rand())
+                             : static_cast<long>(gen.rand64());
+  }
+  KOKKOS_INLINE_FUNCTION
+  static long draw(Generator& gen, const long& range) {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(long) == 4
+               ? static_cast<long>(gen.rand(static_cast<int>(range)))
+               : static_cast<long>(gen.rand64(range));
+  }
+  KOKKOS_INLINE_FUNCTION
+  static long draw(Generator& gen, const long& start, const long& end) {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(long) == 4
+               ? static_cast<long>(
+                     gen.rand(static_cast<int>(start), static_cast<int>(end)))
+               : static_cast<long>(gen.rand64(start, end));
+  }
+};
+
+template <class Generator>
+struct rand<Generator, unsigned long> {
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long max() {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(unsigned long) == 4
+               ? static_cast<unsigned long>(Generator::MAX_URAND)
+               : static_cast<unsigned long>(Generator::MAX_URAND64);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long draw(Generator& gen) {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(unsigned long) == 4
+               ? static_cast<unsigned long>(gen.urand())
+               : static_cast<unsigned long>(gen.urand64());
+  }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long draw(Generator& gen, const unsigned long& range) {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(unsigned long) == 4
+               ? static_cast<unsigned long>(
+                     gen.urand(static_cast<unsigned int>(range)))
+               : static_cast<unsigned long>(gen.urand64(range));
+  }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long draw(Generator& gen, const unsigned long& start,
+                            const unsigned long& end) {
+    // FIXME (mfh 26 Oct 2014) It would be better to select the
+    // return value at compile time, using something like enable_if.
+    return sizeof(unsigned long) == 4
+               ? static_cast<unsigned long>(
+                     gen.urand(static_cast<unsigned int>(start),
+                               static_cast<unsigned int>(end)))
+               : static_cast<unsigned long>(gen.urand64(start, end));
+  }
+};
+
+// NOTE (mfh 26 oct 2014) This is a partial specialization for long
+// long, a C99 / C++11 signed type which is guaranteed to be at
+// least 64 bits.  Do NOT write a partial specialization for
+// int64_t!!!  This is just an alias!  It could be either long or
+// long long.  We don't know which a priori, and I've seen both.
+// The types long and long long are guaranteed to differ, so it's
+// always safe to specialize for both.
+template <class Generator>
+struct rand<Generator, long long> {
+  KOKKOS_INLINE_FUNCTION
+  static long long max() {
+    // FIXME (mfh 26 Oct 2014) It's legal for long long to be > 64 bits.
+    return Generator::MAX_RAND64;
+  }
+  KOKKOS_INLINE_FUNCTION
+  static long long draw(Generator& gen) {
+    // FIXME (mfh 26 Oct 2014) It's legal for long long to be > 64 bits.
+    return gen.rand64();
+  }
+  KOKKOS_INLINE_FUNCTION
+  static long long draw(Generator& gen, const long long& range) {
+    // FIXME (mfh 26 Oct 2014) It's legal for long long to be > 64 bits.
+    return gen.rand64(range);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static long long draw(Generator& gen, const long long& start,
+                        const long long& end) {
+    // FIXME (mfh 26 Oct 2014) It's legal for long long to be > 64 bits.
+    return gen.rand64(start, end);
+  }
+};
+
+// NOTE (mfh 26 oct 2014) This is a partial specialization for
+// unsigned long long, a C99 / C++11 unsigned type which is
+// guaranteed to be at least 64 bits.  Do NOT write a partial
+// specialization for uint64_t!!!  This is just an alias!  It could
+// be either unsigned long or unsigned long long.  We don't know
+// which a priori, and I've seen both.  The types unsigned long and
+// unsigned long long are guaranteed to differ, so it's always safe
+// to specialize for both.
+template <class Generator>
+struct rand<Generator, unsigned long long> {
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long long max() {
+    // FIXME (mfh 26 Oct 2014) It's legal for unsigned long long to be > 64
+    // bits.
+    return Generator::MAX_URAND64;
+  }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long long draw(Generator& gen) {
+    // FIXME (mfh 26 Oct 2014) It's legal for unsigned long long to be > 64
+    // bits.
+    return gen.urand64();
+  }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long long draw(Generator& gen,
+                                 const unsigned long long& range) {
+    // FIXME (mfh 26 Oct 2014) It's legal for long long to be > 64 bits.
+    return gen.urand64(range);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static unsigned long long draw(Generator& gen,
+                                 const unsigned long long& start,
+                                 const unsigned long long& end) {
+    // FIXME (mfh 26 Oct 2014) It's legal for long long to be > 64 bits.
+    return gen.urand64(start, end);
+  }
+};
+
+#if defined(KOKKOS_HALF_T_IS_FLOAT) && !KOKKOS_HALF_T_IS_FLOAT
+template <class Generator>
+struct rand<Generator, Kokkos::Experimental::half_t> {
+  using half = Kokkos::Experimental::half_t;
+  KOKKOS_INLINE_FUNCTION
+  static half max() { return half(1.0); }
+  KOKKOS_INLINE_FUNCTION
+  static half draw(Generator& gen) { return half(gen.frand()); }
+  KOKKOS_INLINE_FUNCTION
+  static half draw(Generator& gen, const half& range) {
+    return half(gen.frand(float(range)));
+  }
+  KOKKOS_INLINE_FUNCTION
+  static half draw(Generator& gen, const half& start, const half& end) {
+    return half(gen.frand(float(start), float(end)));
+  }
+};
+#endif  // defined(KOKKOS_HALF_T_IS_FLOAT) && !KOKKOS_HALF_T_IS_FLOAT
+
+#if defined(KOKKOS_BHALF_T_IS_FLOAT) && !KOKKOS_BHALF_T_IS_FLOAT
+template <class Generator>
+struct rand<Generator, Kokkos::Experimental::bhalf_t> {
+  using bhalf = Kokkos::Experimental::bhalf_t;
+  KOKKOS_INLINE_FUNCTION
+  static bhalf max() { return bhalf(1.0); }
+  KOKKOS_INLINE_FUNCTION
+  static bhalf draw(Generator& gen) { return bhalf(gen.frand()); }
+  KOKKOS_INLINE_FUNCTION
+  static bhalf draw(Generator& gen, const bhalf& range) {
+    return bhalf(gen.frand(float(range)));
+  }
+  KOKKOS_INLINE_FUNCTION
+  static bhalf draw(Generator& gen, const bhalf& start, const bhalf& end) {
+    return bhalf(gen.frand(float(start), float(end)));
+  }
+};
+#endif  // defined(KOKKOS_BHALF_T_IS_FLOAT) && !KOKKOS_BHALF_T_IS_FLOAT
+
+template <class Generator>
+struct rand<Generator, float> {
+  KOKKOS_INLINE_FUNCTION
+  static float max() { return 1.0f; }
+  KOKKOS_INLINE_FUNCTION
+  static float draw(Generator& gen) { return gen.frand(); }
+  KOKKOS_INLINE_FUNCTION
+  static float draw(Generator& gen, const float& range) {
+    return gen.frand(range);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static float draw(Generator& gen, const float& start, const float& end) {
+    return gen.frand(start, end);
+  }
+};
+
+template <class Generator>
+struct rand<Generator, double> {
+  KOKKOS_INLINE_FUNCTION
+  static double max() { return 1.0; }
+  KOKKOS_INLINE_FUNCTION
+  static double draw(Generator& gen) { return gen.drand(); }
+  KOKKOS_INLINE_FUNCTION
+  static double draw(Generator& gen, const double& range) {
+    return gen.drand(range);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static double draw(Generator& gen, const double& start, const double& end) {
+    return gen.drand(start, end);
+  }
+};
+
+template <class Generator>
+struct rand<Generator, Kokkos::complex<float>> {
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<float> max() {
+    return Kokkos::complex<float>(1.0, 1.0);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<float> draw(Generator& gen) {
+    const float re = gen.frand();
+    const float im = gen.frand();
+    return Kokkos::complex<float>(re, im);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<float> draw(Generator& gen,
+                                     const Kokkos::complex<float>& range) {
+    const float re = gen.frand(real(range));
+    const float im = gen.frand(imag(range));
+    return Kokkos::complex<float>(re, im);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<float> draw(Generator& gen,
+                                     const Kokkos::complex<float>& start,
+                                     const Kokkos::complex<float>& end) {
+    const float re = gen.frand(real(start), real(end));
+    const float im = gen.frand(imag(start), imag(end));
+    return Kokkos::complex<float>(re, im);
+  }
+};
+
+template <class Generator>
+struct rand<Generator, Kokkos::complex<double>> {
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<double> max() {
+    return Kokkos::complex<double>(1.0, 1.0);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<double> draw(Generator& gen) {
+    const double re = gen.drand();
+    const double im = gen.drand();
+    return Kokkos::complex<double>(re, im);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<double> draw(Generator& gen,
+                                      const Kokkos::complex<double>& range) {
+    const double re = gen.drand(real(range));
+    const double im = gen.drand(imag(range));
+    return Kokkos::complex<double>(re, im);
+  }
+  KOKKOS_INLINE_FUNCTION
+  static Kokkos::complex<double> draw(Generator& gen,
+                                      const Kokkos::complex<double>& start,
+                                      const Kokkos::complex<double>& end) {
+    const double re = gen.drand(real(start), real(end));
+    const double im = gen.drand(imag(start), imag(end));
+    return Kokkos::complex<double>(re, im);
+  }
+};
+
+template <class DeviceType>
+class Random_XorShift1024_Pool;
+
+namespace Impl {
+
+template <bool UseCArrayState>
+struct Random_XorShift1024_State {
+  uint64_t state_[16];
+  KOKKOS_DEFAULTED_FUNCTION
+  Random_XorShift1024_State() = default;
+
+  template <class StateViewType>
+  KOKKOS_FUNCTION Random_XorShift1024_State(const StateViewType& v,
+                                            int state_idx) {
+    for (int i = 0; i < 16; i++) state_[i] = v(state_idx, i);
+  }
+
+  KOKKOS_FUNCTION
+  uint64_t operator[](const int i) const { return state_[i]; }
+
+  KOKKOS_FUNCTION
+  uint64_t& operator[](const int i) { return state_[i]; }
+};
+
+template <>
+struct Random_XorShift1024_State<false> {
+  uint64_t* state_;
+  const int stride_;
+  KOKKOS_FUNCTION
+  Random_XorShift1024_State() : state_(nullptr), stride_(1){};
+
+  template <class StateViewType>
+  KOKKOS_FUNCTION Random_XorShift1024_State(const StateViewType& v,
+                                            int state_idx)
+      : state_(&v(state_idx, 0)), stride_(v.stride_1()) {}
+
+  KOKKOS_FUNCTION
+  uint64_t operator[](const int i) const { return state_[i * stride_]; }
+
+  KOKKOS_FUNCTION
+  uint64_t& operator[](const int i) { return state_[i * stride_]; }
+};
+
+template <class ExecutionSpace>
+struct Random_XorShift1024_UseCArrayState : std::true_type {};
+
+#ifdef KOKKOS_ENABLE_CUDA
+template <>
+struct Random_XorShift1024_UseCArrayState<Kokkos::Cuda> : std::false_type {};
+#endif
+#ifdef KOKKOS_ENABLE_HIP
+template <>
+struct Random_XorShift1024_UseCArrayState<Kokkos::Experimental::HIP>
+    : std::false_type {};
+#endif
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+template <>
+struct Random_XorShift1024_UseCArrayState<Kokkos::Experimental::OpenMPTarget>
+    : std::false_type {};
+#endif
+
+template <class DeviceType>
+struct Random_UniqueIndex {
+  using locks_view_type = View<int**, DeviceType>;
+  KOKKOS_FUNCTION
+  static int get_state_idx(const locks_view_type) {
+    KOKKOS_IF_ON_HOST(
+        (return DeviceType::execution_space::impl_hardware_thread_id();))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+};
+
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#define KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP Kokkos::Cuda
+#elif defined(KOKKOS_ENABLE_HIP)
+#define KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP Kokkos::Experimental::HIP
+#endif
+
+template <class MemorySpace>
+struct Random_UniqueIndex<
+    Kokkos::Device<KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP, MemorySpace>> {
+  using locks_view_type =
+      View<int**, Kokkos::Device<KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP,
+                                 MemorySpace>>;
+  KOKKOS_FUNCTION
+  static int get_state_idx(const locks_view_type& locks_) {
+    KOKKOS_IF_ON_DEVICE((
+        const int i_offset =
+            (threadIdx.x * blockDim.y + threadIdx.y) * blockDim.z + threadIdx.z;
+        int i =
+            (((blockIdx.x * gridDim.y + blockIdx.y) * gridDim.z + blockIdx.z) *
+                 blockDim.x * blockDim.y * blockDim.z +
+             i_offset) %
+            locks_.extent(0);
+        while (Kokkos::atomic_compare_exchange(&locks_(i, 0), 0, 1)) {
+          i += blockDim.x * blockDim.y * blockDim.z;
+          if (i >= static_cast<int>(locks_.extent(0))) {
+            i = i_offset;
+          }
+        }
+
+        return i;))
+    KOKKOS_IF_ON_HOST(((void)locks_; return 0;))
+  }
+};
+
+#undef KOKKOS_IMPL_EXECUTION_SPACE_CUDA_OR_HIP
+
+#endif
+
+#ifdef KOKKOS_ENABLE_SYCL
+template <class MemorySpace>
+struct Random_UniqueIndex<
+    Kokkos::Device<Kokkos::Experimental::SYCL, MemorySpace>> {
+  using locks_view_type =
+      View<int**, Kokkos::Device<Kokkos::Experimental::SYCL, MemorySpace>>;
+  KOKKOS_FUNCTION
+  static int get_state_idx(const locks_view_type& locks_) {
+    auto item = sycl::ext::oneapi::experimental::this_nd_item<3>();
+    std::size_t threadIdx[3] = {item.get_local_id(2), item.get_local_id(1),
+                                item.get_local_id(0)};
+    std::size_t blockIdx[3]  = {item.get_group(2), item.get_group(1),
+                               item.get_group(0)};
+    std::size_t blockDim[3] = {item.get_local_range(2), item.get_local_range(1),
+                               item.get_local_range(0)};
+    std::size_t gridDim[3]  = {
+        item.get_global_range(2) / item.get_local_range(2),
+        item.get_global_range(1) / item.get_local_range(1),
+        item.get_global_range(0) / item.get_local_range(0)};
+    const int i_offset =
+        (threadIdx[0] * blockDim[1] + threadIdx[1]) * blockDim[2] +
+        threadIdx[2];
+    int i =
+        (((blockIdx[0] * gridDim[1] + blockIdx[1]) * gridDim[2] + blockIdx[2]) *
+             blockDim[0] * blockDim[1] * blockDim[2] +
+         i_offset) %
+        locks_.extent(0);
+    while (Kokkos::atomic_compare_exchange(&locks_(i, 0), 0, 1)) {
+      i += blockDim[0] * blockDim[1] * blockDim[2];
+      if (i >= static_cast<int>(locks_.extent(0))) {
+        i = i_offset;
+      }
+    }
+    return i;
+  }
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+template <class MemorySpace>
+struct Random_UniqueIndex<
+    Kokkos::Device<Kokkos::Experimental::OpenMPTarget, MemorySpace>> {
+  using locks_view_type =
+      View<int**,
+           Kokkos::Device<Kokkos::Experimental::OpenMPTarget, MemorySpace>>;
+  KOKKOS_FUNCTION
+  static int get_state_idx(const locks_view_type& locks) {
+    const int team_size = omp_get_num_threads();
+    int i               = omp_get_team_num() * team_size + omp_get_thread_num();
+    const int lock_size = locks.extent_int(0);
+
+    while (Kokkos::atomic_compare_exchange(&locks(i, 0), 0, 1)) {
+      i = (i + 1) % lock_size;
+    }
+    return i;
+  }
+};
+#endif
+
+}  // namespace Impl
+
+template <class DeviceType>
+class Random_XorShift64_Pool;
+
+template <class DeviceType>
+class Random_XorShift64 {
+ private:
+  uint64_t state_;
+  const int state_idx_;
+  friend class Random_XorShift64_Pool<DeviceType>;
+
+ public:
+  using device_type = DeviceType;
+
+  constexpr static uint32_t MAX_URAND   = std::numeric_limits<uint32_t>::max();
+  constexpr static uint64_t MAX_URAND64 = std::numeric_limits<uint64_t>::max();
+  constexpr static int32_t MAX_RAND     = std::numeric_limits<int32_t>::max();
+  constexpr static int64_t MAX_RAND64   = std::numeric_limits<int64_t>::max();
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift64(uint64_t state, int state_idx = 0)
+      : state_(state == 0 ? uint64_t(1318319) : state), state_idx_(state_idx) {}
+
+  KOKKOS_INLINE_FUNCTION
+  uint32_t urand() {
+    state_ ^= state_ >> 12;
+    state_ ^= state_ << 25;
+    state_ ^= state_ >> 27;
+
+    uint64_t tmp = state_ * 2685821657736338717ULL;
+    tmp          = tmp >> 16;
+    return static_cast<uint32_t>(tmp & MAX_URAND);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint64_t urand64() {
+    state_ ^= state_ >> 12;
+    state_ ^= state_ << 25;
+    state_ ^= state_ >> 27;
+    return (state_ * 2685821657736338717ULL) - 1;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint32_t urand(const uint32_t& range) {
+    const uint32_t max_val = (MAX_URAND / range) * range;
+    uint32_t tmp           = urand();
+    while (tmp >= max_val) tmp = urand();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint32_t urand(const uint32_t& start, const uint32_t& end) {
+    return urand(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint64_t urand64(const uint64_t& range) {
+    const uint64_t max_val = (MAX_URAND64 / range) * range;
+    uint64_t tmp           = urand64();
+    while (tmp >= max_val) tmp = urand64();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint64_t urand64(const uint64_t& start, const uint64_t& end) {
+    return urand64(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int rand() { return static_cast<int>(urand() / 2); }
+
+  KOKKOS_INLINE_FUNCTION
+  int rand(const int& range) {
+    const int max_val = (MAX_RAND / range) * range;
+    int tmp           = rand();
+    while (tmp >= max_val) tmp = rand();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int rand(const int& start, const int& end) {
+    return rand(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int64_t rand64() { return static_cast<int64_t>(urand64() / 2); }
+
+  KOKKOS_INLINE_FUNCTION
+  int64_t rand64(const int64_t& range) {
+    const int64_t max_val = (MAX_RAND64 / range) * range;
+    int64_t tmp           = rand64();
+    while (tmp >= max_val) tmp = rand64();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int64_t rand64(const int64_t& start, const int64_t& end) {
+    return rand64(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  float frand() { return urand64() / static_cast<float>(MAX_URAND64); }
+
+  KOKKOS_INLINE_FUNCTION
+  float frand(const float& range) {
+    return range * urand64() / static_cast<float>(MAX_URAND64);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  float frand(const float& start, const float& end) {
+    return frand(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  double drand() { return urand64() / static_cast<double>(MAX_URAND64); }
+
+  KOKKOS_INLINE_FUNCTION
+  double drand(const double& range) {
+    return range * urand64() / static_cast<double>(MAX_URAND64);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  double drand(const double& start, const double& end) {
+    return drand(end - start) + start;
+  }
+
+  // Marsaglia polar method for drawing a standard normal distributed random
+  // number
+  KOKKOS_INLINE_FUNCTION
+  double normal() {
+    double S = 2.0;
+    double U;
+    while (S >= 1.0) {
+      U              = 2.0 * drand() - 1.0;
+      const double V = 2.0 * drand() - 1.0;
+      S              = U * U + V * V;
+    }
+    return U * std::sqrt(-2.0 * std::log(S) / S);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  double normal(const double& mean, const double& std_dev = 1.0) {
+    return mean + normal() * std_dev;
+  }
+};
+
+template <class DeviceType = Kokkos::DefaultExecutionSpace>
+class Random_XorShift64_Pool {
+ public:
+  using device_type = typename DeviceType::device_type;
+
+ private:
+  using execution_space = typename device_type::execution_space;
+  using locks_type      = View<int**, device_type>;
+  using state_data_type = View<uint64_t**, device_type>;
+  locks_type locks_;
+  state_data_type state_;
+  int num_states_;
+  int padding_;
+
+ public:
+  using generator_type = Random_XorShift64<DeviceType>;
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift64_Pool() {
+    num_states_ = 0;
+    padding_    = 0;
+  }
+  Random_XorShift64_Pool(uint64_t seed) {
+    num_states_ = 0;
+
+    init(seed, execution_space().concurrency());
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift64_Pool(const Random_XorShift64_Pool& src)
+      : locks_(src.locks_), state_(src.state_), num_states_(src.num_states_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift64_Pool operator=(const Random_XorShift64_Pool& src) {
+    locks_      = src.locks_;
+    state_      = src.state_;
+    num_states_ = src.num_states_;
+    padding_    = src.padding_;
+    return *this;
+  }
+
+  void init(uint64_t seed, int num_states) {
+    if (seed == 0) seed = uint64_t(1318319);
+    // I only want to pad on CPU like archs (less than 1000 threads). 64 is a
+    // magic number, or random number I just wanted something not too large and
+    // not too small. 64 sounded fine.
+    padding_    = num_states < 1000 ? 64 : 1;
+    num_states_ = num_states;
+
+    locks_ =
+        locks_type("Kokkos::Random_XorShift64::locks", num_states, padding_);
+    state_ = state_data_type("Kokkos::Random_XorShift64::state", num_states_,
+                             padding_);
+
+    typename state_data_type::HostMirror h_state =
+        Kokkos::create_mirror_view(Kokkos::WithoutInitializing, state_);
+    typename locks_type::HostMirror h_lock =
+        Kokkos::create_mirror_view(Kokkos::WithoutInitializing, locks_);
+
+    // Execute on the HostMirror's default execution space.
+    Random_XorShift64<typename state_data_type::HostMirror::execution_space>
+        gen(seed, 0);
+    for (int i = 0; i < 17; i++) gen.rand();
+    for (int i = 0; i < num_states_; i++) {
+      int n1        = gen.rand();
+      int n2        = gen.rand();
+      int n3        = gen.rand();
+      int n4        = gen.rand();
+      h_state(i, 0) = (((static_cast<uint64_t>(n1)) & 0xffff) << 00) |
+                      (((static_cast<uint64_t>(n2)) & 0xffff) << 16) |
+                      (((static_cast<uint64_t>(n3)) & 0xffff) << 32) |
+                      (((static_cast<uint64_t>(n4)) & 0xffff) << 48);
+      h_lock(i, 0) = 0;
+    }
+    deep_copy(state_, h_state);
+    deep_copy(locks_, h_lock);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift64<DeviceType> get_state() const {
+    const int i = Impl::Random_UniqueIndex<device_type>::get_state_idx(locks_);
+    return Random_XorShift64<DeviceType>(state_(i, 0), i);
+  }
+
+  // NOTE: state_idx MUST be unique and less than num_states
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift64<DeviceType> get_state(const int state_idx) const {
+    return Random_XorShift64<DeviceType>(state_(state_idx, 0), state_idx);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void free_state(const Random_XorShift64<DeviceType>& state) const {
+    state_(state.state_idx_, 0) = state.state_;
+    locks_(state.state_idx_, 0) = 0;
+  }
+};
+
+template <class DeviceType>
+class Random_XorShift1024 {
+  using execution_space = typename DeviceType::execution_space;
+
+ private:
+  int p_;
+  const int state_idx_;
+  Impl::Random_XorShift1024_State<
+      Impl::Random_XorShift1024_UseCArrayState<execution_space>::value>
+      state_;
+  friend class Random_XorShift1024_Pool<DeviceType>;
+
+ public:
+  using pool_type   = Random_XorShift1024_Pool<DeviceType>;
+  using device_type = DeviceType;
+
+  constexpr static uint32_t MAX_URAND   = std::numeric_limits<uint32_t>::max();
+  constexpr static uint64_t MAX_URAND64 = std::numeric_limits<uint64_t>::max();
+  constexpr static int32_t MAX_RAND     = std::numeric_limits<int32_t>::max();
+  constexpr static int64_t MAX_RAND64   = std::numeric_limits<int64_t>::max();
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift1024(const typename pool_type::state_data_type& state, int p,
+                      int state_idx = 0)
+      : p_(p), state_idx_(state_idx), state_(state, state_idx) {}
+
+  KOKKOS_INLINE_FUNCTION
+  uint32_t urand() {
+    uint64_t state_0 = state_[p_];
+    uint64_t state_1 = state_[p_ = (p_ + 1) & 15];
+    state_1 ^= state_1 << 31;
+    state_1 ^= state_1 >> 11;
+    state_0 ^= state_0 >> 30;
+    uint64_t tmp = (state_[p_] = state_0 ^ state_1) * 1181783497276652981ULL;
+    tmp          = tmp >> 16;
+    return static_cast<uint32_t>(tmp & MAX_URAND);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint64_t urand64() {
+    uint64_t state_0 = state_[p_];
+    uint64_t state_1 = state_[p_ = (p_ + 1) & 15];
+    state_1 ^= state_1 << 31;
+    state_1 ^= state_1 >> 11;
+    state_0 ^= state_0 >> 30;
+    return ((state_[p_] = state_0 ^ state_1) * 1181783497276652981LL) - 1;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint32_t urand(const uint32_t& range) {
+    const uint32_t max_val = (MAX_URAND / range) * range;
+    uint32_t tmp           = urand();
+    while (tmp >= max_val) tmp = urand();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint32_t urand(const uint32_t& start, const uint32_t& end) {
+    return urand(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint64_t urand64(const uint64_t& range) {
+    const uint64_t max_val = (MAX_URAND64 / range) * range;
+    uint64_t tmp           = urand64();
+    while (tmp >= max_val) tmp = urand64();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  uint64_t urand64(const uint64_t& start, const uint64_t& end) {
+    return urand64(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int rand() { return static_cast<int>(urand() / 2); }
+
+  KOKKOS_INLINE_FUNCTION
+  int rand(const int& range) {
+    const int max_val = (MAX_RAND / range) * range;
+    int tmp           = rand();
+    while (tmp >= max_val) tmp = rand();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int rand(const int& start, const int& end) {
+    return rand(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int64_t rand64() { return static_cast<int64_t>(urand64() / 2); }
+
+  KOKKOS_INLINE_FUNCTION
+  int64_t rand64(const int64_t& range) {
+    const int64_t max_val = (MAX_RAND64 / range) * range;
+    int64_t tmp           = rand64();
+    while (tmp >= max_val) tmp = rand64();
+    return tmp % range;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int64_t rand64(const int64_t& start, const int64_t& end) {
+    return rand64(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  float frand() { return urand64() / static_cast<float>(MAX_URAND64); }
+
+  KOKKOS_INLINE_FUNCTION
+  float frand(const float& range) {
+    return range * urand64() / static_cast<float>(MAX_URAND64);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  float frand(const float& start, const float& end) {
+    return frand(end - start) + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  double drand() { return urand64() / static_cast<double>(MAX_URAND64); }
+
+  KOKKOS_INLINE_FUNCTION
+  double drand(const double& range) {
+    return range * urand64() / static_cast<double>(MAX_URAND64);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  double drand(const double& start, const double& end) {
+    return drand(end - start) + start;
+  }
+
+  // Marsaglia polar method for drawing a standard normal distributed random
+  // number
+  KOKKOS_INLINE_FUNCTION
+  double normal() {
+    double S = 2.0;
+    double U;
+    while (S >= 1.0) {
+      U              = 2.0 * drand() - 1.0;
+      const double V = 2.0 * drand() - 1.0;
+      S              = U * U + V * V;
+    }
+    return U * std::sqrt(-2.0 * std::log(S) / S);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  double normal(const double& mean, const double& std_dev = 1.0) {
+    return mean + normal() * std_dev;
+  }
+};
+
+template <class DeviceType = Kokkos::DefaultExecutionSpace>
+class Random_XorShift1024_Pool {
+ public:
+  using device_type = typename DeviceType::device_type;
+
+ private:
+  using execution_space = typename device_type::execution_space;
+  using locks_type      = View<int**, device_type>;
+  using int_view_type   = View<int**, device_type>;
+  using state_data_type = View<uint64_t * [16], device_type>;
+
+  locks_type locks_;
+  state_data_type state_;
+  int_view_type p_;
+  int num_states_;
+  int padding_;
+  friend class Random_XorShift1024<DeviceType>;
+
+ public:
+  using generator_type = Random_XorShift1024<DeviceType>;
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift1024_Pool() { num_states_ = 0; }
+
+  inline Random_XorShift1024_Pool(uint64_t seed) {
+    num_states_ = 0;
+
+    init(seed, execution_space().concurrency());
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift1024_Pool(const Random_XorShift1024_Pool& src)
+      : locks_(src.locks_),
+        state_(src.state_),
+        p_(src.p_),
+        num_states_(src.num_states_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift1024_Pool operator=(const Random_XorShift1024_Pool& src) {
+    locks_      = src.locks_;
+    state_      = src.state_;
+    p_          = src.p_;
+    num_states_ = src.num_states_;
+    padding_    = src.padding_;
+    return *this;
+  }
+
+  inline void init(uint64_t seed, int num_states) {
+    if (seed == 0) seed = uint64_t(1318319);
+    // I only want to pad on CPU like archs (less than 1000 threads). 64 is a
+    // magic number, or random number I just wanted something not too large and
+    // not too small. 64 sounded fine.
+    padding_    = num_states < 1000 ? 64 : 1;
+    num_states_ = num_states;
+    locks_ =
+        locks_type("Kokkos::Random_XorShift1024::locks", num_states_, padding_);
+    state_ = state_data_type("Kokkos::Random_XorShift1024::state", num_states_);
+    p_ = int_view_type("Kokkos::Random_XorShift1024::p", num_states_, padding_);
+
+    typename state_data_type::HostMirror h_state =
+        Kokkos::create_mirror_view(Kokkos::WithoutInitializing, state_);
+    typename locks_type::HostMirror h_lock =
+        Kokkos::create_mirror_view(Kokkos::WithoutInitializing, locks_);
+    typename int_view_type::HostMirror h_p =
+        Kokkos::create_mirror_view(Kokkos::WithoutInitializing, p_);
+
+    // Execute on the HostMirror's default execution space.
+    Random_XorShift64<typename state_data_type::HostMirror::execution_space>
+        gen(seed, 0);
+    for (int i = 0; i < 17; i++) gen.rand();
+    for (int i = 0; i < num_states_; i++) {
+      for (int j = 0; j < 16; j++) {
+        int n1        = gen.rand();
+        int n2        = gen.rand();
+        int n3        = gen.rand();
+        int n4        = gen.rand();
+        h_state(i, j) = (((static_cast<uint64_t>(n1)) & 0xffff) << 00) |
+                        (((static_cast<uint64_t>(n2)) & 0xffff) << 16) |
+                        (((static_cast<uint64_t>(n3)) & 0xffff) << 32) |
+                        (((static_cast<uint64_t>(n4)) & 0xffff) << 48);
+      }
+      h_p(i, 0)    = 0;
+      h_lock(i, 0) = 0;
+    }
+    deep_copy(state_, h_state);
+    deep_copy(locks_, h_lock);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift1024<DeviceType> get_state() const {
+    const int i = Impl::Random_UniqueIndex<device_type>::get_state_idx(locks_);
+    return Random_XorShift1024<DeviceType>(state_, p_(i, 0), i);
+  };
+
+  // NOTE: state_idx MUST be unique and less than num_states
+  KOKKOS_INLINE_FUNCTION
+  Random_XorShift1024<DeviceType> get_state(const int state_idx) const {
+    return Random_XorShift1024<DeviceType>(state_, p_(state_idx, 0), state_idx);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void free_state(const Random_XorShift1024<DeviceType>& state) const {
+    for (int i = 0; i < 16; i++) state_(state.state_idx_, i) = state.state_[i];
+    p_(state.state_idx_, 0)     = state.p_;
+    locks_(state.state_idx_, 0) = 0;
+  }
+};
+
+namespace Impl {
+
+template <class ViewType, class RandomPool, int loops, int rank,
+          class IndexType>
+struct fill_random_functor_begin_end;
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 0,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    a()                                     = Rand::draw(gen, begin, end);
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 1,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0)))
+        a(idx) = Rand::draw(gen, begin, end);
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 2,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0))) {
+        for (IndexType k = 0; k < static_cast<IndexType>(a.extent(1)); k++)
+          a(idx, k) = Rand::draw(gen, begin, end);
+      }
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 3,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0))) {
+        for (IndexType k = 0; k < static_cast<IndexType>(a.extent(1)); k++)
+          for (IndexType l = 0; l < static_cast<IndexType>(a.extent(2)); l++)
+            a(idx, k, l) = Rand::draw(gen, begin, end);
+      }
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 4,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0))) {
+        for (IndexType k = 0; k < static_cast<IndexType>(a.extent(1)); k++)
+          for (IndexType l = 0; l < static_cast<IndexType>(a.extent(2)); l++)
+            for (IndexType m = 0; m < static_cast<IndexType>(a.extent(3)); m++)
+              a(idx, k, l, m) = Rand::draw(gen, begin, end);
+      }
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 5,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0))) {
+        for (IndexType l = 0; l < static_cast<IndexType>(a.extent(1)); l++)
+          for (IndexType m = 0; m < static_cast<IndexType>(a.extent(2)); m++)
+            for (IndexType n = 0; n < static_cast<IndexType>(a.extent(3)); n++)
+              for (IndexType o = 0; o < static_cast<IndexType>(a.extent(4));
+                   o++)
+                a(idx, l, m, n, o) = Rand::draw(gen, begin, end);
+      }
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 6,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0))) {
+        for (IndexType k = 0; k < static_cast<IndexType>(a.extent(1)); k++)
+          for (IndexType l = 0; l < static_cast<IndexType>(a.extent(2)); l++)
+            for (IndexType m = 0; m < static_cast<IndexType>(a.extent(3)); m++)
+              for (IndexType n = 0; n < static_cast<IndexType>(a.extent(4));
+                   n++)
+                for (IndexType o = 0; o < static_cast<IndexType>(a.extent(5));
+                     o++)
+                  a(idx, k, l, m, n, o) = Rand::draw(gen, begin, end);
+      }
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 7,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0))) {
+        for (IndexType k = 0; k < static_cast<IndexType>(a.extent(1)); k++)
+          for (IndexType l = 0; l < static_cast<IndexType>(a.extent(2)); l++)
+            for (IndexType m = 0; m < static_cast<IndexType>(a.extent(3)); m++)
+              for (IndexType n = 0; n < static_cast<IndexType>(a.extent(4));
+                   n++)
+                for (IndexType o = 0; o < static_cast<IndexType>(a.extent(5));
+                     o++)
+                  for (IndexType p = 0; p < static_cast<IndexType>(a.extent(6));
+                       p++)
+                    a(idx, k, l, m, n, o, p) = Rand::draw(gen, begin, end);
+      }
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ViewType, class RandomPool, int loops, class IndexType>
+struct fill_random_functor_begin_end<ViewType, RandomPool, loops, 8,
+                                     IndexType> {
+  ViewType a;
+  RandomPool rand_pool;
+  typename ViewType::const_value_type begin, end;
+
+  using Rand = rand<typename RandomPool::generator_type,
+                    typename ViewType::non_const_value_type>;
+
+  fill_random_functor_begin_end(ViewType a_, RandomPool rand_pool_,
+                                typename ViewType::const_value_type begin_,
+                                typename ViewType::const_value_type end_)
+      : a(a_), rand_pool(rand_pool_), begin(begin_), end(end_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(IndexType i) const {
+    typename RandomPool::generator_type gen = rand_pool.get_state();
+    for (IndexType j = 0; j < loops; j++) {
+      const IndexType idx = i * loops + j;
+      if (idx < static_cast<IndexType>(a.extent(0))) {
+        for (IndexType k = 0; k < static_cast<IndexType>(a.extent(1)); k++)
+          for (IndexType l = 0; l < static_cast<IndexType>(a.extent(2)); l++)
+            for (IndexType m = 0; m < static_cast<IndexType>(a.extent(3)); m++)
+              for (IndexType n = 0; n < static_cast<IndexType>(a.extent(4));
+                   n++)
+                for (IndexType o = 0; o < static_cast<IndexType>(a.extent(5));
+                     o++)
+                  for (IndexType p = 0; p < static_cast<IndexType>(a.extent(6));
+                       p++)
+                    for (IndexType q = 0;
+                         q < static_cast<IndexType>(a.extent(7)); q++)
+                      a(idx, k, l, m, n, o, p, q) = Rand::draw(gen, begin, end);
+      }
+    }
+    rand_pool.free_state(gen);
+  }
+};
+
+template <class ExecutionSpace, class ViewType, class RandomPool,
+          class IndexType = int64_t>
+void fill_random(const ExecutionSpace& exec, ViewType a, RandomPool g,
+                 typename ViewType::const_value_type begin,
+                 typename ViewType::const_value_type end) {
+  int64_t LDA = a.extent(0);
+  if (LDA > 0)
+    parallel_for(
+        "Kokkos::fill_random",
+        Kokkos::RangePolicy<ExecutionSpace>(exec, 0, (LDA + 127) / 128),
+        Impl::fill_random_functor_begin_end<ViewType, RandomPool, 128,
+                                            ViewType::Rank, IndexType>(
+            a, g, begin, end));
+}
+
+}  // namespace Impl
+
+template <class ExecutionSpace, class ViewType, class RandomPool,
+          class IndexType = int64_t>
+void fill_random(const ExecutionSpace& exec, ViewType a, RandomPool g,
+                 typename ViewType::const_value_type begin,
+                 typename ViewType::const_value_type end) {
+  Impl::apply_to_view_of_static_rank(
+      [&](auto dst) { Kokkos::Impl::fill_random(exec, dst, g, begin, end); },
+      a);
+}
+
+template <class ExecutionSpace, class ViewType, class RandomPool,
+          class IndexType = int64_t>
+void fill_random(const ExecutionSpace& exec, ViewType a, RandomPool g,
+                 typename ViewType::const_value_type range) {
+  fill_random(exec, a, g, 0, range);
+}
+
+template <class ViewType, class RandomPool, class IndexType = int64_t>
+void fill_random(ViewType a, RandomPool g,
+                 typename ViewType::const_value_type begin,
+                 typename ViewType::const_value_type end) {
+  fill_random(typename ViewType::execution_space{}, a, g, begin, end);
+}
+
+template <class ViewType, class RandomPool, class IndexType = int64_t>
+void fill_random(ViewType a, RandomPool g,
+                 typename ViewType::const_value_type range) {
+  fill_random(typename ViewType::execution_space{}, a, g, 0, range);
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_RANDOM
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_RANDOM
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/Kokkos_Sort.hpp b/bundled/kokkos-3.7.00/algorithms/src/Kokkos_Sort.hpp
new file mode 100644 (file)
index 0000000..ad0c2d4
--- /dev/null
@@ -0,0 +1,696 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SORT_HPP_
+#define KOKKOS_SORT_HPP_
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#include <algorithm>
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class DstViewType, class SrcViewType, int Rank = DstViewType::Rank>
+struct CopyOp;
+
+template <class DstViewType, class SrcViewType>
+struct CopyOp<DstViewType, SrcViewType, 1> {
+  KOKKOS_INLINE_FUNCTION
+  static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
+                   size_t i_src) {
+    dst(i_dst) = src(i_src);
+  }
+};
+
+template <class DstViewType, class SrcViewType>
+struct CopyOp<DstViewType, SrcViewType, 2> {
+  KOKKOS_INLINE_FUNCTION
+  static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
+                   size_t i_src) {
+    for (int j = 0; j < (int)dst.extent(1); j++) dst(i_dst, j) = src(i_src, j);
+  }
+};
+
+template <class DstViewType, class SrcViewType>
+struct CopyOp<DstViewType, SrcViewType, 3> {
+  KOKKOS_INLINE_FUNCTION
+  static void copy(DstViewType const& dst, size_t i_dst, SrcViewType const& src,
+                   size_t i_src) {
+    for (int j = 0; j < dst.extent(1); j++)
+      for (int k = 0; k < dst.extent(2); k++)
+        dst(i_dst, j, k) = src(i_src, j, k);
+  }
+};
+}  // namespace Impl
+
+//----------------------------------------------------------------------------
+
+template <class KeyViewType, class BinSortOp,
+          class Space    = typename KeyViewType::device_type,
+          class SizeType = typename KeyViewType::memory_space::size_type>
+class BinSort {
+ public:
+  template <class DstViewType, class SrcViewType>
+  struct copy_functor {
+    using src_view_type = typename SrcViewType::const_type;
+
+    using copy_op = Impl::CopyOp<DstViewType, src_view_type>;
+
+    DstViewType dst_values;
+    src_view_type src_values;
+    int dst_offset;
+
+    copy_functor(DstViewType const& dst_values_, int const& dst_offset_,
+                 SrcViewType const& src_values_)
+        : dst_values(dst_values_),
+          src_values(src_values_),
+          dst_offset(dst_offset_) {}
+
+    KOKKOS_INLINE_FUNCTION
+    void operator()(const int& i) const {
+      copy_op::copy(dst_values, i + dst_offset, src_values, i);
+    }
+  };
+
+  template <class DstViewType, class PermuteViewType, class SrcViewType>
+  struct copy_permute_functor {
+    // If a Kokkos::View then can generate constant random access
+    // otherwise can only use the constant type.
+
+    using src_view_type = std::conditional_t<
+        Kokkos::is_view<SrcViewType>::value,
+        Kokkos::View<typename SrcViewType::const_data_type,
+                     typename SrcViewType::array_layout,
+                     typename SrcViewType::device_type,
+                     Kokkos::MemoryTraits<Kokkos::RandomAccess> >,
+        typename SrcViewType::const_type>;
+
+    using perm_view_type = typename PermuteViewType::const_type;
+
+    using copy_op = Impl::CopyOp<DstViewType, src_view_type>;
+
+    DstViewType dst_values;
+    perm_view_type sort_order;
+    src_view_type src_values;
+    int src_offset;
+
+    copy_permute_functor(DstViewType const& dst_values_,
+                         PermuteViewType const& sort_order_,
+                         SrcViewType const& src_values_, int const& src_offset_)
+        : dst_values(dst_values_),
+          sort_order(sort_order_),
+          src_values(src_values_),
+          src_offset(src_offset_) {}
+
+    KOKKOS_INLINE_FUNCTION
+    void operator()(const int& i) const {
+      copy_op::copy(dst_values, i, src_values, src_offset + sort_order(i));
+    }
+  };
+
+  // Naming this alias "execution_space" would be problematic since it would be
+  // considered as execution space for the various functors which might use
+  // another execution space through sort() or create_permute_vector().
+  using exec_space  = typename Space::execution_space;
+  using bin_op_type = BinSortOp;
+
+  struct bin_count_tag {};
+  struct bin_offset_tag {};
+  struct bin_binning_tag {};
+  struct bin_sort_bins_tag {};
+
+ public:
+  using size_type  = SizeType;
+  using value_type = size_type;
+
+  using offset_type    = Kokkos::View<size_type*, Space>;
+  using bin_count_type = Kokkos::View<const int*, Space>;
+
+  using const_key_view_type = typename KeyViewType::const_type;
+
+  // If a Kokkos::View then can generate constant random access
+  // otherwise can only use the constant type.
+
+  using const_rnd_key_view_type = std::conditional_t<
+      Kokkos::is_view<KeyViewType>::value,
+      Kokkos::View<typename KeyViewType::const_data_type,
+                   typename KeyViewType::array_layout,
+                   typename KeyViewType::device_type,
+                   Kokkos::MemoryTraits<Kokkos::RandomAccess> >,
+      const_key_view_type>;
+
+  using non_const_key_scalar = typename KeyViewType::non_const_value_type;
+  using const_key_scalar     = typename KeyViewType::const_value_type;
+
+  using bin_count_atomic_type =
+      Kokkos::View<int*, Space, Kokkos::MemoryTraits<Kokkos::Atomic> >;
+
+ private:
+  const_key_view_type keys;
+  const_rnd_key_view_type keys_rnd;
+
+ public:
+  BinSortOp bin_op;
+  offset_type bin_offsets;
+  bin_count_atomic_type bin_count_atomic;
+  bin_count_type bin_count_const;
+  offset_type sort_order;
+
+  int range_begin;
+  int range_end;
+  bool sort_within_bins;
+
+ public:
+  BinSort() = default;
+
+  //----------------------------------------
+  // Constructor: takes the keys, the binning_operator and optionally whether to
+  // sort within bins (default false)
+  template <typename ExecutionSpace>
+  BinSort(const ExecutionSpace& exec, const_key_view_type keys_,
+          int range_begin_, int range_end_, BinSortOp bin_op_,
+          bool sort_within_bins_ = false)
+      : keys(keys_),
+        keys_rnd(keys_),
+        bin_op(bin_op_),
+        bin_offsets(),
+        bin_count_atomic(),
+        bin_count_const(),
+        sort_order(),
+        range_begin(range_begin_),
+        range_end(range_end_),
+        sort_within_bins(sort_within_bins_) {
+    static_assert(
+        Kokkos::SpaceAccessibility<ExecutionSpace,
+                                   typename Space::memory_space>::accessible,
+        "The provided execution space must be able to access the memory space "
+        "BinSort was initialized with!");
+    if (bin_op.max_bins() <= 0)
+      Kokkos::abort(
+          "The number of bins in the BinSortOp object must be greater than 0!");
+    bin_count_atomic = Kokkos::View<int*, Space>(
+        "Kokkos::SortImpl::BinSortFunctor::bin_count", bin_op.max_bins());
+    bin_count_const = bin_count_atomic;
+    bin_offsets =
+        offset_type(view_alloc(exec, WithoutInitializing,
+                               "Kokkos::SortImpl::BinSortFunctor::bin_offsets"),
+                    bin_op.max_bins());
+    sort_order =
+        offset_type(view_alloc(exec, WithoutInitializing,
+                               "Kokkos::SortImpl::BinSortFunctor::sort_order"),
+                    range_end - range_begin);
+  }
+
+  BinSort(const_key_view_type keys_, int range_begin_, int range_end_,
+          BinSortOp bin_op_, bool sort_within_bins_ = false)
+      : BinSort(exec_space{}, keys_, range_begin_, range_end_, bin_op_,
+                sort_within_bins_) {}
+
+  template <typename ExecutionSpace>
+  BinSort(const ExecutionSpace& exec, const_key_view_type keys_,
+          BinSortOp bin_op_, bool sort_within_bins_ = false)
+      : BinSort(exec, keys_, 0, keys_.extent(0), bin_op_, sort_within_bins_) {}
+
+  BinSort(const_key_view_type keys_, BinSortOp bin_op_,
+          bool sort_within_bins_ = false)
+      : BinSort(exec_space{}, keys_, bin_op_, sort_within_bins_) {}
+
+  //----------------------------------------
+  // Create the permutation vector, the bin_offset array and the bin_count
+  // array. Can be called again if keys changed
+  template <class ExecutionSpace = exec_space>
+  void create_permute_vector(const ExecutionSpace& exec = exec_space{}) {
+    static_assert(
+        Kokkos::SpaceAccessibility<ExecutionSpace,
+                                   typename Space::memory_space>::accessible,
+        "The provided execution space must be able to access the memory space "
+        "BinSort was initialized with!");
+
+    const size_t len = range_end - range_begin;
+    Kokkos::parallel_for(
+        "Kokkos::Sort::BinCount",
+        Kokkos::RangePolicy<ExecutionSpace, bin_count_tag>(exec, 0, len),
+        *this);
+    Kokkos::parallel_scan("Kokkos::Sort::BinOffset",
+                          Kokkos::RangePolicy<ExecutionSpace, bin_offset_tag>(
+                              exec, 0, bin_op.max_bins()),
+                          *this);
+
+    Kokkos::deep_copy(exec, bin_count_atomic, 0);
+    Kokkos::parallel_for(
+        "Kokkos::Sort::BinBinning",
+        Kokkos::RangePolicy<ExecutionSpace, bin_binning_tag>(exec, 0, len),
+        *this);
+
+    if (sort_within_bins)
+      Kokkos::parallel_for(
+          "Kokkos::Sort::BinSort",
+          Kokkos::RangePolicy<ExecutionSpace, bin_sort_bins_tag>(
+              exec, 0, bin_op.max_bins()),
+          *this);
+  }
+
+  // Sort a subset of a view with respect to the first dimension using the
+  // permutation array
+  template <class ExecutionSpace, class ValuesViewType>
+  void sort(const ExecutionSpace& exec, ValuesViewType const& values,
+            int values_range_begin, int values_range_end) const {
+    static_assert(
+        Kokkos::SpaceAccessibility<ExecutionSpace,
+                                   typename Space::memory_space>::accessible,
+        "The provided execution space must be able to access the memory space "
+        "BinSort was initialized with!");
+    static_assert(
+        Kokkos::SpaceAccessibility<
+            ExecutionSpace, typename ValuesViewType::memory_space>::accessible,
+        "The provided execution space must be able to access the memory space "
+        "of the View argument!");
+
+    using scratch_view_type =
+        Kokkos::View<typename ValuesViewType::data_type,
+                     typename ValuesViewType::array_layout,
+                     typename ValuesViewType::device_type>;
+
+    const size_t len        = range_end - range_begin;
+    const size_t values_len = values_range_end - values_range_begin;
+    if (len != values_len) {
+      Kokkos::abort(
+          "BinSort::sort: values range length != permutation vector length");
+    }
+
+    scratch_view_type sorted_values(
+        view_alloc(exec, WithoutInitializing,
+                   "Kokkos::SortImpl::BinSortFunctor::sorted_values"),
+        values.rank_dynamic > 0 ? len : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        values.rank_dynamic > 1 ? values.extent(1)
+                                : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        values.rank_dynamic > 2 ? values.extent(2)
+                                : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        values.rank_dynamic > 3 ? values.extent(3)
+                                : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        values.rank_dynamic > 4 ? values.extent(4)
+                                : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        values.rank_dynamic > 5 ? values.extent(5)
+                                : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        values.rank_dynamic > 6 ? values.extent(6)
+                                : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        values.rank_dynamic > 7 ? values.extent(7)
+                                : KOKKOS_IMPL_CTOR_DEFAULT_ARG);
+
+    {
+      copy_permute_functor<scratch_view_type /* DstViewType */
+                           ,
+                           offset_type /* PermuteViewType */
+                           ,
+                           ValuesViewType /* SrcViewType */
+                           >
+          functor(sorted_values, sort_order, values,
+                  values_range_begin - range_begin);
+
+      parallel_for("Kokkos::Sort::CopyPermute",
+                   Kokkos::RangePolicy<ExecutionSpace>(exec, 0, len), functor);
+    }
+
+    {
+      copy_functor<ValuesViewType, scratch_view_type> functor(
+          values, range_begin, sorted_values);
+
+      parallel_for("Kokkos::Sort::Copy",
+                   Kokkos::RangePolicy<ExecutionSpace>(exec, 0, len), functor);
+    }
+  }
+
+  // Sort a subset of a view with respect to the first dimension using the
+  // permutation array
+  template <class ValuesViewType>
+  void sort(ValuesViewType const& values, int values_range_begin,
+            int values_range_end) const {
+    exec_space exec;
+    sort(exec, values, values_range_begin, values_range_end);
+    exec.fence("Kokkos::Sort: fence after sorting");
+  }
+
+  template <class ExecutionSpace, class ValuesViewType>
+  void sort(ExecutionSpace const& exec, ValuesViewType const& values) const {
+    this->sort(exec, values, 0, /*values.extent(0)*/ range_end - range_begin);
+  }
+
+  template <class ValuesViewType>
+  void sort(ValuesViewType const& values) const {
+    this->sort(values, 0, /*values.extent(0)*/ range_end - range_begin);
+  }
+
+  // Get the permutation vector
+  KOKKOS_INLINE_FUNCTION
+  offset_type get_permute_vector() const { return sort_order; }
+
+  // Get the start offsets for each bin
+  KOKKOS_INLINE_FUNCTION
+  offset_type get_bin_offsets() const { return bin_offsets; }
+
+  // Get the count for each bin
+  KOKKOS_INLINE_FUNCTION
+  bin_count_type get_bin_count() const { return bin_count_const; }
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const bin_count_tag& /*tag*/, const int i) const {
+    const int j = range_begin + i;
+    bin_count_atomic(bin_op.bin(keys, j))++;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const bin_offset_tag& /*tag*/, const int i,
+                  value_type& offset, const bool& final) const {
+    if (final) {
+      bin_offsets(i) = offset;
+    }
+    offset += bin_count_const(i);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const bin_binning_tag& /*tag*/, const int i) const {
+    const int j     = range_begin + i;
+    const int bin   = bin_op.bin(keys, j);
+    const int count = bin_count_atomic(bin)++;
+
+    sort_order(bin_offsets(bin) + count) = j;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const bin_sort_bins_tag& /*tag*/, const int i) const {
+    auto bin_size = bin_count_const(i);
+    if (bin_size <= 1) return;
+    int upper_bound = bin_offsets(i) + bin_size;
+    bool sorted     = false;
+    while (!sorted) {
+      sorted      = true;
+      int old_idx = sort_order(bin_offsets(i));
+      int new_idx = 0;
+      for (int k = bin_offsets(i) + 1; k < upper_bound; k++) {
+        new_idx = sort_order(k);
+
+        if (!bin_op(keys_rnd, old_idx, new_idx)) {
+          sort_order(k - 1) = new_idx;
+          sort_order(k)     = old_idx;
+          sorted            = false;
+        } else {
+          old_idx = new_idx;
+        }
+      }
+      upper_bound--;
+    }
+  }
+};
+
+//----------------------------------------------------------------------------
+
+template <class KeyViewType>
+struct BinOp1D {
+  int max_bins_ = {};
+  double mul_   = {};
+  double min_   = {};
+
+  BinOp1D() = default;
+
+  // Construct BinOp with number of bins, minimum value and maximum value
+  BinOp1D(int max_bins__, typename KeyViewType::const_value_type min,
+          typename KeyViewType::const_value_type max)
+      : max_bins_(max_bins__ + 1),
+        // Cast to double to avoid possible overflow when using integer
+        mul_(static_cast<double>(max_bins__) /
+             (static_cast<double>(max) - static_cast<double>(min))),
+        min_(static_cast<double>(min)) {
+    // For integral types the number of bins may be larger than the range
+    // in which case we can exactly have one unique value per bin
+    // and then don't need to sort bins.
+    if (std::is_integral<typename KeyViewType::const_value_type>::value &&
+        (static_cast<double>(max) - static_cast<double>(min)) <=
+            static_cast<double>(max_bins__)) {
+      mul_ = 1.;
+    }
+  }
+
+  // Determine bin index from key value
+  template <class ViewType>
+  KOKKOS_INLINE_FUNCTION int bin(ViewType& keys, const int& i) const {
+    return static_cast<int>(mul_ * (static_cast<double>(keys(i)) - min_));
+  }
+
+  // Return maximum bin index + 1
+  KOKKOS_INLINE_FUNCTION
+  int max_bins() const { return max_bins_; }
+
+  // Compare to keys within a bin if true new_val will be put before old_val
+  template <class ViewType, typename iType1, typename iType2>
+  KOKKOS_INLINE_FUNCTION bool operator()(ViewType& keys, iType1& i1,
+                                         iType2& i2) const {
+    return keys(i1) < keys(i2);
+  }
+};
+
+template <class KeyViewType>
+struct BinOp3D {
+  int max_bins_[3] = {};
+  double mul_[3]   = {};
+  double min_[3]   = {};
+
+  BinOp3D() = default;
+
+  BinOp3D(int max_bins__[], typename KeyViewType::const_value_type min[],
+          typename KeyViewType::const_value_type max[]) {
+    max_bins_[0] = max_bins__[0];
+    max_bins_[1] = max_bins__[1];
+    max_bins_[2] = max_bins__[2];
+    mul_[0]      = static_cast<double>(max_bins__[0]) /
+              (static_cast<double>(max[0]) - static_cast<double>(min[0]));
+    mul_[1] = static_cast<double>(max_bins__[1]) /
+              (static_cast<double>(max[1]) - static_cast<double>(min[1]));
+    mul_[2] = static_cast<double>(max_bins__[2]) /
+              (static_cast<double>(max[2]) - static_cast<double>(min[2]));
+    min_[0] = static_cast<double>(min[0]);
+    min_[1] = static_cast<double>(min[1]);
+    min_[2] = static_cast<double>(min[2]);
+  }
+
+  template <class ViewType>
+  KOKKOS_INLINE_FUNCTION int bin(ViewType& keys, const int& i) const {
+    return int((((int(mul_[0] * (keys(i, 0) - min_[0])) * max_bins_[1]) +
+                 int(mul_[1] * (keys(i, 1) - min_[1]))) *
+                max_bins_[2]) +
+               int(mul_[2] * (keys(i, 2) - min_[2])));
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int max_bins() const { return max_bins_[0] * max_bins_[1] * max_bins_[2]; }
+
+  template <class ViewType, typename iType1, typename iType2>
+  KOKKOS_INLINE_FUNCTION bool operator()(ViewType& keys, iType1& i1,
+                                         iType2& i2) const {
+    if (keys(i1, 0) > keys(i2, 0))
+      return true;
+    else if (keys(i1, 0) == keys(i2, 0)) {
+      if (keys(i1, 1) > keys(i2, 1))
+        return true;
+      else if (keys(i1, 1) == keys(i2, 1)) {
+        if (keys(i1, 2) > keys(i2, 2)) return true;
+      }
+    }
+    return false;
+  }
+};
+
+namespace Impl {
+
+template <class ViewType, class ExecutionSpace>
+bool try_std_sort(ViewType view, const ExecutionSpace& exec) {
+  bool possible    = true;
+  size_t stride[8] = {view.stride_0(), view.stride_1(), view.stride_2(),
+                      view.stride_3(), view.stride_4(), view.stride_5(),
+                      view.stride_6(), view.stride_7()};
+  possible         = possible &&
+             SpaceAccessibility<HostSpace,
+                                typename ViewType::memory_space>::accessible;
+  possible = possible && (ViewType::Rank == 1);
+  possible = possible && (stride[0] == 1);
+  if (possible) {
+    exec.fence("Kokkos::sort: Fence before sorting on the host");
+    std::sort(view.data(), view.data() + view.extent(0));
+  }
+  return possible;
+}
+
+template <class ViewType>
+struct min_max_functor {
+  using minmax_scalar =
+      Kokkos::MinMaxScalar<typename ViewType::non_const_value_type>;
+
+  ViewType view;
+  min_max_functor(const ViewType& view_) : view(view_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const size_t& i, minmax_scalar& minmax) const {
+    if (view(i) < minmax.min_val) minmax.min_val = view(i);
+    if (view(i) > minmax.max_val) minmax.max_val = view(i);
+  }
+};
+
+}  // namespace Impl
+
+template <class ExecutionSpace, class ViewType>
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value> sort(
+    const ExecutionSpace& exec, ViewType const& view) {
+  using CompType = BinOp1D<ViewType>;
+
+  Kokkos::MinMaxScalar<typename ViewType::non_const_value_type> result;
+  Kokkos::MinMax<typename ViewType::non_const_value_type> reducer(result);
+  parallel_reduce("Kokkos::Sort::FindExtent",
+                  Kokkos::RangePolicy<typename ViewType::execution_space>(
+                      exec, 0, view.extent(0)),
+                  Impl::min_max_functor<ViewType>(view), reducer);
+  if (result.min_val == result.max_val) return;
+  // For integral types the number of bins may be larger than the range
+  // in which case we can exactly have one unique value per bin
+  // and then don't need to sort bins.
+  bool sort_in_bins = true;
+  // TODO: figure out better max_bins then this ...
+  int64_t max_bins = view.extent(0) / 2;
+  if (std::is_integral<typename ViewType::non_const_value_type>::value) {
+    // Cast to double to avoid possible overflow when using integer
+    auto const max_val = static_cast<double>(result.max_val);
+    auto const min_val = static_cast<double>(result.min_val);
+    // using 10M as the cutoff for special behavior (roughly 40MB for the count
+    // array)
+    if ((max_val - min_val) < 10000000) {
+      max_bins     = max_val - min_val + 1;
+      sort_in_bins = false;
+    }
+  }
+  if (std::is_floating_point<typename ViewType::non_const_value_type>::value) {
+    KOKKOS_ASSERT(std::isfinite(static_cast<double>(result.max_val) -
+                                static_cast<double>(result.min_val)));
+  }
+
+  BinSort<ViewType, CompType> bin_sort(
+      view, CompType(max_bins, result.min_val, result.max_val), sort_in_bins);
+  bin_sort.create_permute_vector(exec);
+  bin_sort.sort(exec, view);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class ExecutionSpace, class ViewType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload not taking bool always_use_kokkos_sort")
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value> sort(
+    const ExecutionSpace& exec, ViewType const& view,
+    bool const always_use_kokkos_sort) {
+  if (!always_use_kokkos_sort && Impl::try_std_sort(view, exec)) {
+    return;
+  } else {
+    sort(exec, view);
+  }
+}
+#endif
+
+template <class ViewType>
+void sort(ViewType const& view) {
+  typename ViewType::execution_space exec;
+  sort(exec, view);
+  exec.fence("Kokkos::Sort: fence after sorting");
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class ViewType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload not taking bool always_use_kokkos_sort")
+void sort(ViewType const& view, bool const always_use_kokkos_sort) {
+  typename ViewType::execution_space exec;
+  sort(exec, view, always_use_kokkos_sort);
+  exec.fence("Kokkos::Sort: fence after sorting");
+}
+#endif
+
+template <class ExecutionSpace, class ViewType>
+std::enable_if_t<Kokkos::is_execution_space<ExecutionSpace>::value> sort(
+    const ExecutionSpace& exec, ViewType view, size_t const begin,
+    size_t const end) {
+  using range_policy = Kokkos::RangePolicy<typename ViewType::execution_space>;
+  using CompType     = BinOp1D<ViewType>;
+
+  Kokkos::MinMaxScalar<typename ViewType::non_const_value_type> result;
+  Kokkos::MinMax<typename ViewType::non_const_value_type> reducer(result);
+
+  parallel_reduce("Kokkos::Sort::FindExtent", range_policy(exec, begin, end),
+                  Impl::min_max_functor<ViewType>(view), reducer);
+
+  if (result.min_val == result.max_val) return;
+
+  BinSort<ViewType, CompType> bin_sort(
+      exec, view, begin, end,
+      CompType((end - begin) / 2, result.min_val, result.max_val), true);
+
+  bin_sort.create_permute_vector(exec);
+  bin_sort.sort(exec, view, begin, end);
+}
+
+template <class ViewType>
+void sort(ViewType view, size_t const begin, size_t const end) {
+  typename ViewType::execution_space exec;
+  sort(exec, view, begin, end);
+  exec.fence("Kokkos::Sort: fence after sorting");
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SORT
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/Kokkos_StdAlgorithms.hpp b/bundled/kokkos-3.7.00/algorithms/src/Kokkos_StdAlgorithms.hpp
new file mode 100644 (file)
index 0000000..3e0f731
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_HPP
+#define KOKKOS_STD_ALGORITHMS_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_STD_ALGORITHMS
+#endif
+
+/// \file Kokkos_StdAlgorithms.hpp
+/// \brief Kokkos counterparts for Standard C++ Library algorithms
+
+#include "std_algorithms/impl/Kokkos_Constraints.hpp"
+#include "std_algorithms/impl/Kokkos_RandomAccessIterator.hpp"
+#include "std_algorithms/Kokkos_BeginEnd.hpp"
+
+// distance
+#include "std_algorithms/Kokkos_Distance.hpp"
+
+// note that we categorize below the headers
+// following the std classification.
+
+// modifying ops
+#include "std_algorithms/Kokkos_Swap.hpp"
+#include "std_algorithms/Kokkos_IterSwap.hpp"
+
+// non-modifying sequence
+#include "std_algorithms/Kokkos_AdjacentFind.hpp"
+#include "std_algorithms/Kokkos_Count.hpp"
+#include "std_algorithms/Kokkos_CountIf.hpp"
+#include "std_algorithms/Kokkos_AllOf.hpp"
+#include "std_algorithms/Kokkos_AnyOf.hpp"
+#include "std_algorithms/Kokkos_NoneOf.hpp"
+#include "std_algorithms/Kokkos_Equal.hpp"
+#include "std_algorithms/Kokkos_Find.hpp"
+#include "std_algorithms/Kokkos_FindIf.hpp"
+#include "std_algorithms/Kokkos_FindIfNot.hpp"
+#include "std_algorithms/Kokkos_FindEnd.hpp"
+#include "std_algorithms/Kokkos_FindFirstOf.hpp"
+#include "std_algorithms/Kokkos_ForEach.hpp"
+#include "std_algorithms/Kokkos_ForEachN.hpp"
+#include "std_algorithms/Kokkos_LexicographicalCompare.hpp"
+#include "std_algorithms/Kokkos_Mismatch.hpp"
+#include "std_algorithms/Kokkos_Search.hpp"
+#include "std_algorithms/Kokkos_SearchN.hpp"
+
+// modifying sequence
+#include "std_algorithms/Kokkos_Fill.hpp"
+#include "std_algorithms/Kokkos_FillN.hpp"
+#include "std_algorithms/Kokkos_Replace.hpp"
+#include "std_algorithms/Kokkos_ReplaceIf.hpp"
+#include "std_algorithms/Kokkos_ReplaceCopyIf.hpp"
+#include "std_algorithms/Kokkos_ReplaceCopy.hpp"
+#include "std_algorithms/Kokkos_Copy.hpp"
+#include "std_algorithms/Kokkos_CopyN.hpp"
+#include "std_algorithms/Kokkos_CopyBackward.hpp"
+#include "std_algorithms/Kokkos_CopyIf.hpp"
+#include "std_algorithms/Kokkos_Transform.hpp"
+#include "std_algorithms/Kokkos_Generate.hpp"
+#include "std_algorithms/Kokkos_GenerateN.hpp"
+#include "std_algorithms/Kokkos_Reverse.hpp"
+#include "std_algorithms/Kokkos_ReverseCopy.hpp"
+#include "std_algorithms/Kokkos_Move.hpp"
+#include "std_algorithms/Kokkos_MoveBackward.hpp"
+#include "std_algorithms/Kokkos_SwapRanges.hpp"
+#include "std_algorithms/Kokkos_Unique.hpp"
+#include "std_algorithms/Kokkos_UniqueCopy.hpp"
+#include "std_algorithms/Kokkos_Rotate.hpp"
+#include "std_algorithms/Kokkos_RotateCopy.hpp"
+#include "std_algorithms/Kokkos_Remove.hpp"
+#include "std_algorithms/Kokkos_RemoveIf.hpp"
+#include "std_algorithms/Kokkos_RemoveCopy.hpp"
+#include "std_algorithms/Kokkos_RemoveCopyIf.hpp"
+#include "std_algorithms/Kokkos_ShiftLeft.hpp"
+#include "std_algorithms/Kokkos_ShiftRight.hpp"
+
+// sorting
+#include "std_algorithms/Kokkos_IsSortedUntil.hpp"
+#include "std_algorithms/Kokkos_IsSorted.hpp"
+
+// min/max element
+#include "std_algorithms/Kokkos_MinElement.hpp"
+#include "std_algorithms/Kokkos_MaxElement.hpp"
+#include "std_algorithms/Kokkos_MinMaxElement.hpp"
+
+// partitioning
+#include "std_algorithms/Kokkos_IsPartitioned.hpp"
+#include "std_algorithms/Kokkos_PartitionCopy.hpp"
+#include "std_algorithms/Kokkos_PartitionPoint.hpp"
+
+// numeric
+#include "std_algorithms/Kokkos_AdjacentDifference.hpp"
+#include "std_algorithms/Kokkos_Reduce.hpp"
+#include "std_algorithms/Kokkos_TransformReduce.hpp"
+#include "std_algorithms/Kokkos_ExclusiveScan.hpp"
+#include "std_algorithms/Kokkos_TransformExclusiveScan.hpp"
+#include "std_algorithms/Kokkos_InclusiveScan.hpp"
+#include "std_algorithms/Kokkos_TransformInclusiveScan.hpp"
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_STD_ALGORITHMS
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_STD_ALGORITHMS
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AdjacentDifference.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AdjacentDifference.hpp
new file mode 100644 (file)
index 0000000..0a7cf06
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_HPP
+
+#include "impl/Kokkos_AdjacentDifference.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType>
+std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
+                 OutputIteratorType>
+adjacent_difference(const ExecutionSpace& ex, InputIteratorType first_from,
+                    InputIteratorType last_from,
+                    OutputIteratorType first_dest) {
+  using value_type1 = typename InputIteratorType::value_type;
+  using value_type2 = typename OutputIteratorType::value_type;
+  using binary_op =
+      Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+                                                        value_type2>;
+  return Impl::adjacent_difference_impl(
+      "Kokkos::adjacent_difference_iterator_api", ex, first_from, last_from,
+      first_dest, binary_op());
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOp>
+std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
+                 OutputIteratorType>
+adjacent_difference(const ExecutionSpace& ex, InputIteratorType first_from,
+                    InputIteratorType last_from, OutputIteratorType first_dest,
+                    BinaryOp bin_op) {
+  return Impl::adjacent_difference_impl(
+      "Kokkos::adjacent_difference_iterator_api", ex, first_from, last_from,
+      first_dest, bin_op);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType>
+std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
+                 OutputIteratorType>
+adjacent_difference(const std::string& label, const ExecutionSpace& ex,
+                    InputIteratorType first_from, InputIteratorType last_from,
+                    OutputIteratorType first_dest) {
+  using value_type1 = typename InputIteratorType::value_type;
+  using value_type2 = typename OutputIteratorType::value_type;
+  using binary_op =
+      Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+                                                        value_type2>;
+  return Impl::adjacent_difference_impl(label, ex, first_from, last_from,
+                                        first_dest, binary_op());
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOp>
+std::enable_if_t<!::Kokkos::is_view<InputIteratorType>::value,
+                 OutputIteratorType>
+adjacent_difference(const std::string& label, const ExecutionSpace& ex,
+                    InputIteratorType first_from, InputIteratorType last_from,
+                    OutputIteratorType first_dest, BinaryOp bin_op) {
+  return Impl::adjacent_difference_impl(label, ex, first_from, last_from,
+                                        first_dest, bin_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto adjacent_difference(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+  using view_type1  = ::Kokkos::View<DataType1, Properties1...>;
+  using view_type2  = ::Kokkos::View<DataType2, Properties2...>;
+  using value_type1 = typename view_type1::value_type;
+  using value_type2 = typename view_type2::value_type;
+  using binary_op =
+      Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+                                                        value_type2>;
+  return Impl::adjacent_difference_impl(
+      "Kokkos::adjacent_difference_view_api", ex, KE::cbegin(view_from),
+      KE::cend(view_from), KE::begin(view_dest), binary_op());
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOp>
+auto adjacent_difference(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    BinaryOp bin_op) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  return Impl::adjacent_difference_impl(
+      "Kokkos::adjacent_difference_view_api", ex, KE::cbegin(view_from),
+      KE::cend(view_from), KE::begin(view_dest), bin_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto adjacent_difference(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+  using view_type1  = ::Kokkos::View<DataType1, Properties1...>;
+  using view_type2  = ::Kokkos::View<DataType2, Properties2...>;
+  using value_type1 = typename view_type1::value_type;
+  using value_type2 = typename view_type2::value_type;
+  using binary_op =
+      Impl::StdAdjacentDifferenceDefaultBinaryOpFunctor<value_type1,
+                                                        value_type2>;
+
+  return Impl::adjacent_difference_impl(label, ex, KE::cbegin(view_from),
+                                        KE::cend(view_from),
+                                        KE::begin(view_dest), binary_op());
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOp>
+auto adjacent_difference(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    BinaryOp bin_op) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  return Impl::adjacent_difference_impl(label, ex, KE::cbegin(view_from),
+                                        KE::cend(view_from),
+                                        KE::begin(view_dest), bin_op);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AdjacentFind.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AdjacentFind.hpp
new file mode 100644 (file)
index 0000000..332f9dd
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_HPP
+
+#include "impl/Kokkos_AdjacentFind.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set1
+template <class ExecutionSpace, class IteratorType>
+IteratorType adjacent_find(const ExecutionSpace& ex, IteratorType first,
+                           IteratorType last) {
+  return Impl::adjacent_find_impl("Kokkos::adjacent_find_iterator_api_default",
+                                  ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType adjacent_find(const std::string& label, const ExecutionSpace& ex,
+                           IteratorType first, IteratorType last) {
+  return Impl::adjacent_find_impl(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto adjacent_find(const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::adjacent_find_impl("Kokkos::adjacent_find_view_api_default", ex,
+                                  KE::begin(v), KE::end(v));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto adjacent_find(const std::string& label, const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::adjacent_find_impl(label, ex, KE::begin(v), KE::end(v));
+}
+
+// overload set2
+template <class ExecutionSpace, class IteratorType, class BinaryPredicateType>
+IteratorType adjacent_find(const ExecutionSpace& ex, IteratorType first,
+                           IteratorType last, BinaryPredicateType pred) {
+  return Impl::adjacent_find_impl("Kokkos::adjacent_find_iterator_api_default",
+                                  ex, first, last, pred);
+}
+
+template <class ExecutionSpace, class IteratorType, class BinaryPredicateType>
+IteratorType adjacent_find(const std::string& label, const ExecutionSpace& ex,
+                           IteratorType first, IteratorType last,
+                           BinaryPredicateType pred) {
+  return Impl::adjacent_find_impl(label, ex, first, last, pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class BinaryPredicateType>
+auto adjacent_find(const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType, Properties...>& v,
+                   BinaryPredicateType pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::adjacent_find_impl("Kokkos::adjacent_find_view_api_default", ex,
+                                  KE::begin(v), KE::end(v), pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class BinaryPredicateType>
+auto adjacent_find(const std::string& label, const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType, Properties...>& v,
+                   BinaryPredicateType pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::adjacent_find_impl(label, ex, KE::begin(v), KE::end(v), pred);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AllOf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AllOf.hpp
new file mode 100644 (file)
index 0000000..66a4954
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ALL_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_ALL_OF_HPP
+
+#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool all_of(const ExecutionSpace& ex, InputIterator first, InputIterator last,
+            Predicate predicate) {
+  return Impl::all_of_impl("Kokkos::all_of_iterator_api_default", ex, first,
+                           last, predicate);
+}
+
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool all_of(const std::string& label, const ExecutionSpace& ex,
+            InputIterator first, InputIterator last, Predicate predicate) {
+  return Impl::all_of_impl(label, ex, first, last, predicate);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+bool all_of(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& v,
+            Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::all_of_impl("Kokkos::all_of_view_api_default", ex, KE::cbegin(v),
+                           KE::cend(v), std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+bool all_of(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& v,
+            Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::all_of_impl(label, ex, KE::cbegin(v), KE::cend(v),
+                           std::move(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AnyOf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_AnyOf.hpp
new file mode 100644 (file)
index 0000000..e50e90f
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ANY_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_ANY_OF_HPP
+
+#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool any_of(const ExecutionSpace& ex, InputIterator first, InputIterator last,
+            Predicate predicate) {
+  return Impl::any_of_impl("Kokkos::any_of_view_api_default", ex, first, last,
+                           predicate);
+}
+
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool any_of(const std::string& label, const ExecutionSpace& ex,
+            InputIterator first, InputIterator last, Predicate predicate) {
+  return Impl::any_of_impl(label, ex, first, last, predicate);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+bool any_of(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& v,
+            Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::any_of_impl("Kokkos::any_of_view_api_default", ex, KE::cbegin(v),
+                           KE::cend(v), std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+bool any_of(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& v,
+            Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::any_of_impl(label, ex, KE::cbegin(v), KE::cend(v),
+                           std::move(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_BeginEnd.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_BeginEnd.hpp
new file mode 100644 (file)
index 0000000..5449196
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_BEGIN_END_HPP
+#define KOKKOS_BEGIN_END_HPP
+
+#include <Kokkos_View.hpp>
+#include "impl/Kokkos_RandomAccessIterator.hpp"
+#include "impl/Kokkos_Constraints.hpp"
+
+/// \file Kokkos_BeginEnd.hpp
+/// \brief Kokkos begin, end, cbegin, cend
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class DataType, class... Properties>
+KOKKOS_INLINE_FUNCTION auto begin(
+    const Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  using it_t =
+      Impl::RandomAccessIterator<Kokkos::View<DataType, Properties...>>;
+  return it_t(v);
+}
+
+template <class DataType, class... Properties>
+KOKKOS_INLINE_FUNCTION auto end(
+    const Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  using it_t =
+      Impl::RandomAccessIterator<Kokkos::View<DataType, Properties...>>;
+  return it_t(v, v.extent(0));
+}
+
+template <class DataType, class... Properties>
+KOKKOS_INLINE_FUNCTION auto cbegin(
+    const Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  using ViewConstType =
+      typename Kokkos::View<DataType, Properties...>::const_type;
+  const ViewConstType cv = v;
+  using it_t             = Impl::RandomAccessIterator<ViewConstType>;
+  return it_t(cv);
+}
+
+template <class DataType, class... Properties>
+KOKKOS_INLINE_FUNCTION auto cend(
+    const Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  using ViewConstType =
+      typename Kokkos::View<DataType, Properties...>::const_type;
+  const ViewConstType cv = v;
+  using it_t             = Impl::RandomAccessIterator<ViewConstType>;
+  return it_t(cv, cv.extent(0));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Copy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Copy.hpp
new file mode 100644 (file)
index 0000000..b323704
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_HPP
+
+#include "impl/Kokkos_CopyCopyN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator copy(const ExecutionSpace& ex, InputIterator first,
+                    InputIterator last, OutputIterator d_first) {
+  return Impl::copy_impl("Kokkos::copy_iterator_api_default", ex, first, last,
+                         d_first);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator copy(const std::string& label, const ExecutionSpace& ex,
+                    InputIterator first, InputIterator last,
+                    OutputIterator d_first) {
+  return Impl::copy_impl(label, ex, first, last, d_first);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto copy(const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType1, Properties1...>& source,
+          ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::copy_impl("Kokkos::copy_view_api_default", ex,
+                         KE::cbegin(source), KE::cend(source), KE::begin(dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto copy(const std::string& label, const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType1, Properties1...>& source,
+          ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::copy_impl(label, ex, KE::cbegin(source), KE::cend(source),
+                         KE::begin(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyBackward.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyBackward.hpp
new file mode 100644 (file)
index 0000000..83efd96
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_HPP
+
+#include "impl/Kokkos_CopyBackward.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 copy_backward(const ExecutionSpace& ex, IteratorType1 first,
+                            IteratorType1 last, IteratorType2 d_last) {
+  return Impl::copy_backward_impl("Kokkos::copy_backward_iterator_api_default",
+                                  ex, first, last, d_last);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 copy_backward(const std::string& label, const ExecutionSpace& ex,
+                            IteratorType1 first, IteratorType1 last,
+                            IteratorType2 d_last) {
+  return Impl::copy_backward_impl(label, ex, first, last, d_last);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto copy_backward(const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& source,
+                   ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::copy_backward_impl("Kokkos::copy_backward_view_api_default", ex,
+                                  cbegin(source), cend(source), end(dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto copy_backward(const std::string& label, const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& source,
+                   ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::copy_backward_impl(label, ex, cbegin(source), cend(source),
+                                  end(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyIf.hpp
new file mode 100644 (file)
index 0000000..c83cc29
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_IF_HPP
+
+#include "impl/Kokkos_CopyIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class Predicate>
+OutputIterator copy_if(const ExecutionSpace& ex, InputIterator first,
+                       InputIterator last, OutputIterator d_first,
+                       Predicate pred) {
+  return Impl::copy_if_impl("Kokkos::copy_if_iterator_api_default", ex, first,
+                            last, d_first, std::move(pred));
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class Predicate>
+OutputIterator copy_if(const std::string& label, const ExecutionSpace& ex,
+                       InputIterator first, InputIterator last,
+                       OutputIterator d_first, Predicate pred) {
+  return Impl::copy_if_impl(label, ex, first, last, d_first, std::move(pred));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class Predicate>
+auto copy_if(const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType1, Properties1...>& source,
+             ::Kokkos::View<DataType2, Properties2...>& dest, Predicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::copy_if_impl("Kokkos::copy_if_view_api_default", ex,
+                            cbegin(source), cend(source), begin(dest),
+                            std::move(pred));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class Predicate>
+auto copy_if(const std::string& label, const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType1, Properties1...>& source,
+             ::Kokkos::View<DataType2, Properties2...>& dest, Predicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::copy_if_impl(label, ex, cbegin(source), cend(source),
+                            begin(dest), std::move(pred));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CopyN.hpp
new file mode 100644 (file)
index 0000000..7f3b937
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_N_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_N_HPP
+
+#include "impl/Kokkos_CopyCopyN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class Size,
+          class OutputIterator>
+OutputIterator copy_n(const ExecutionSpace& ex, InputIterator first, Size count,
+                      OutputIterator result) {
+  return Impl::copy_n_impl("Kokkos::copy_n_iterator_api_default", ex, first,
+                           count, result);
+}
+
+template <class ExecutionSpace, class InputIterator, class Size,
+          class OutputIterator>
+OutputIterator copy_n(const std::string& label, const ExecutionSpace& ex,
+                      InputIterator first, Size count, OutputIterator result) {
+  return Impl::copy_n_impl(label, ex, first, count, result);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class Size, class DataType2, class... Properties2>
+auto copy_n(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType1, Properties1...>& source, Size count,
+            ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::copy_n_impl("Kokkos::copy_n_view_api_default", ex,
+                           KE::cbegin(source), count, KE::begin(dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class Size, class DataType2, class... Properties2>
+auto copy_n(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType1, Properties1...>& source, Size count,
+            ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::copy_n_impl(label, ex, KE::cbegin(source), count,
+                           KE::begin(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Count.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Count.hpp
new file mode 100644 (file)
index 0000000..a885ee4
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COUNT_HPP
+#define KOKKOS_STD_ALGORITHMS_COUNT_HPP
+
+#include "impl/Kokkos_CountCountIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class T>
+typename IteratorType::difference_type count(const ExecutionSpace& ex,
+                                             IteratorType first,
+                                             IteratorType last,
+                                             const T& value) {
+  return Impl::count_impl("Kokkos::count_iterator_api_default", ex, first, last,
+                          value);
+}
+
+template <class ExecutionSpace, class IteratorType, class T>
+typename IteratorType::difference_type count(const std::string& label,
+                                             const ExecutionSpace& ex,
+                                             IteratorType first,
+                                             IteratorType last,
+                                             const T& value) {
+  return Impl::count_impl(label, ex, first, last, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class T>
+auto count(const ExecutionSpace& ex,
+           const ::Kokkos::View<DataType, Properties...>& v, const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::count_impl("Kokkos::count_view_api_default", ex, KE::cbegin(v),
+                          KE::cend(v), value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class T>
+auto count(const std::string& label, const ExecutionSpace& ex,
+           const ::Kokkos::View<DataType, Properties...>& v, const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::count_impl(label, ex, KE::cbegin(v), KE::cend(v), value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CountIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_CountIf.hpp
new file mode 100644 (file)
index 0000000..98b9d74
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COUNT_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_COUNT_IF_HPP
+
+#include "impl/Kokkos_CountCountIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+typename IteratorType::difference_type count_if(const ExecutionSpace& ex,
+                                                IteratorType first,
+                                                IteratorType last,
+                                                Predicate predicate) {
+  return Impl::count_if_impl("Kokkos::count_if_iterator_api_default", ex, first,
+                             last, std::move(predicate));
+}
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+typename IteratorType::difference_type count_if(const std::string& label,
+                                                const ExecutionSpace& ex,
+                                                IteratorType first,
+                                                IteratorType last,
+                                                Predicate predicate) {
+  return Impl::count_if_impl(label, ex, first, last, std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+auto count_if(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& v,
+              Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::count_if_impl("Kokkos::count_if_view_api_default", ex,
+                             KE::cbegin(v), KE::cend(v), std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+auto count_if(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& v,
+              Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::count_if_impl(label, ex, KE::cbegin(v), KE::cend(v),
+                             std::move(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Distance.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Distance.hpp
new file mode 100644 (file)
index 0000000..4e14864
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_DISTANCE_HPP
+#define KOKKOS_STD_ALGORITHMS_DISTANCE_HPP
+
+#include "impl/Kokkos_Constraints.hpp"
+#include "impl/Kokkos_RandomAccessIterator.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class IteratorType>
+KOKKOS_INLINE_FUNCTION constexpr typename IteratorType::difference_type
+distance(IteratorType first, IteratorType last) {
+  static_assert(
+      ::Kokkos::Experimental::Impl::are_random_access_iterators<
+          IteratorType>::value,
+      "Kokkos::Experimental::distance: only implemented for random access "
+      "iterators.");
+
+  return last - first;
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Equal.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Equal.hpp
new file mode 100644 (file)
index 0000000..8634019
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_EQUAL_HPP
+#define KOKKOS_STD_ALGORITHMS_EQUAL_HPP
+
+#include "impl/Kokkos_Equal.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+      IteratorType2 first2) {
+  return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
+                          last1, first2);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+      IteratorType1 last1, IteratorType2 first2) {
+  return Impl::equal_impl(label, ex, first1, last1, first2);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+      IteratorType2 first2, BinaryPredicateType predicate) {
+  return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
+                          last1, first2, std::move(predicate));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+      IteratorType1 last1, IteratorType2 first2,
+      BinaryPredicateType predicate) {
+  return Impl::equal_impl(label, ex, first1, last1, first2,
+                          std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+bool equal(const ExecutionSpace& ex,
+           const ::Kokkos::View<DataType1, Properties1...>& view1,
+           ::Kokkos::View<DataType2, Properties2...>& view2) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::equal_impl("Kokkos::equal_view_api_default", ex,
+                          KE::cbegin(view1), KE::cend(view1),
+                          KE::cbegin(view2));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+           const ::Kokkos::View<DataType1, Properties1...>& view1,
+           ::Kokkos::View<DataType2, Properties2...>& view2) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::equal_impl(label, ex, KE::cbegin(view1), KE::cend(view1),
+                          KE::cbegin(view2));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+bool equal(const ExecutionSpace& ex,
+           const ::Kokkos::View<DataType1, Properties1...>& view1,
+           ::Kokkos::View<DataType2, Properties2...>& view2,
+           BinaryPredicateType predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::equal_impl("Kokkos::equal_view_api_default", ex,
+                          KE::cbegin(view1), KE::cend(view1), KE::cbegin(view2),
+                          std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+bool equal(const std::string& label, const ExecutionSpace& ex,
+           const ::Kokkos::View<DataType1, Properties1...>& view1,
+           ::Kokkos::View<DataType2, Properties2...>& view2,
+           BinaryPredicateType predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::equal_impl(label, ex, KE::cbegin(view1), KE::cend(view1),
+                          KE::cbegin(view2), std::move(predicate));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+      IteratorType2 first2, IteratorType2 last2) {
+  return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
+                          last1, first2, last2);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+      IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
+  return Impl::equal_impl(label, ex, first1, last1, first2, last2);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+      IteratorType2 first2, IteratorType2 last2,
+      BinaryPredicateType predicate) {
+  return Impl::equal_impl("Kokkos::equal_iterator_api_default", ex, first1,
+                          last1, first2, last2, std::move(predicate));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      IteratorType1, IteratorType2>::value,
+                  bool>
+equal(const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+      IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
+      BinaryPredicateType predicate) {
+  return Impl::equal_impl(label, ex, first1, last1, first2, last2,
+                          std::move(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ExclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ExclusiveScan.hpp
new file mode 100644 (file)
index 0000000..b97710f
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_ExclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set 1
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+exclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+               InputIteratorType last, OutputIteratorType first_dest,
+               ValueType init_value) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  return Impl::exclusive_scan_default_op_impl(
+      "Kokkos::exclusive_scan_default_functors_iterator_api", ex, first, last,
+      first_dest, init_value);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+exclusive_scan(const std::string& label, const ExecutionSpace& ex,
+               InputIteratorType first, InputIteratorType last,
+               OutputIteratorType first_dest, ValueType init_value) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  return Impl::exclusive_scan_default_op_impl(label, ex, first, last,
+                                              first_dest, init_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+auto exclusive_scan(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    ValueType init_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::exclusive_scan_default_op_impl(
+      "Kokkos::exclusive_scan_default_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+      init_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+auto exclusive_scan(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    ValueType init_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::exclusive_scan_default_op_impl(label, ex, KE::cbegin(view_from),
+                                              KE::cend(view_from),
+                                              KE::begin(view_dest), init_value);
+}
+
+// overload set 2
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType, class BinaryOpType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+exclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+               InputIteratorType last, OutputIteratorType first_dest,
+               ValueType init_value, BinaryOpType bop) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  return Impl::exclusive_scan_custom_op_impl(
+      "Kokkos::exclusive_scan_custom_functors_iterator_api", ex, first, last,
+      first_dest, init_value, bop);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType, class BinaryOpType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+exclusive_scan(const std::string& label, const ExecutionSpace& ex,
+               InputIteratorType first, InputIteratorType last,
+               OutputIteratorType first_dest, ValueType init_value,
+               BinaryOpType bop) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  return Impl::exclusive_scan_custom_op_impl(label, ex, first, last, first_dest,
+                                             init_value, bop);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType,
+          class BinaryOpType>
+auto exclusive_scan(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    ValueType init_value, BinaryOpType bop) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::exclusive_scan_custom_op_impl(
+      "Kokkos::exclusive_scan_custom_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+      init_value, bop);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType,
+          class BinaryOpType>
+auto exclusive_scan(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    ValueType init_value, BinaryOpType bop) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::exclusive_scan_custom_op_impl(
+      label, ex, KE::cbegin(view_from), KE::cend(view_from),
+      KE::begin(view_dest), init_value, bop);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Fill.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Fill.hpp
new file mode 100644 (file)
index 0000000..200e03b
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FILL_HPP
+#define KOKKOS_STD_ALGORITHMS_FILL_HPP
+
+#include "impl/Kokkos_FillFillN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class T>
+void fill(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+          const T& value) {
+  Impl::fill_impl("Kokkos::fill_iterator_api_default", ex, first, last, value);
+}
+
+template <class ExecutionSpace, class IteratorType, class T>
+void fill(const std::string& label, const ExecutionSpace& ex,
+          IteratorType first, IteratorType last, const T& value) {
+  Impl::fill_impl(label, ex, first, last, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class T>
+void fill(const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  Impl::fill_impl("Kokkos::fill_view_api_default", ex, begin(view), end(view),
+                  value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class T>
+void fill(const std::string& label, const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  Impl::fill_impl(label, ex, begin(view), end(view), value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FillN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FillN.hpp
new file mode 100644 (file)
index 0000000..2e814dc
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FILL_N_HPP
+#define KOKKOS_STD_ALGORITHMS_FILL_N_HPP
+
+#include "impl/Kokkos_FillFillN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class SizeType, class T>
+IteratorType fill_n(const ExecutionSpace& ex, IteratorType first, SizeType n,
+                    const T& value) {
+  return Impl::fill_n_impl("Kokkos::fill_n_iterator_api_default", ex, first, n,
+                           value);
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType, class T>
+IteratorType fill_n(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, SizeType n, const T& value) {
+  return Impl::fill_n_impl(label, ex, first, n, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class T>
+auto fill_n(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view, SizeType n,
+            const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::fill_n_impl("Kokkos::fill_n_view_api_default", ex, begin(view),
+                           n, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class T>
+auto fill_n(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view, SizeType n,
+            const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::fill_n_impl(label, ex, begin(view), n, value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Find.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Find.hpp
new file mode 100644 (file)
index 0000000..6758f00
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_HPP
+
+#include "impl/Kokkos_FindIfOrNot.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class T>
+InputIterator find(const ExecutionSpace& ex, InputIterator first,
+                   InputIterator last, const T& value) {
+  return Impl::find_impl("Kokkos::find_iterator_api_default", ex, first, last,
+                         value);
+}
+
+template <class ExecutionSpace, class InputIterator, class T>
+InputIterator find(const std::string& label, const ExecutionSpace& ex,
+                   InputIterator first, InputIterator last, const T& value) {
+  return Impl::find_impl(label, ex, first, last, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class T>
+auto find(const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_impl("Kokkos::find_view_api_default", ex, KE::begin(view),
+                         KE::end(view), value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class T>
+auto find(const std::string& label, const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType, Properties...>& view, const T& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_impl(label, ex, KE::begin(view), KE::end(view), value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindEnd.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindEnd.hpp
new file mode 100644 (file)
index 0000000..61b54c8
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_END_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_END_HPP
+
+#include "impl/Kokkos_FindEnd.hpp"
+#include "Kokkos_Equal.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set 1: no binary predicate passed
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 find_end(const ExecutionSpace& ex, IteratorType1 first,
+                       IteratorType1 last, IteratorType2 s_first,
+                       IteratorType2 s_last) {
+  return Impl::find_end_impl("Kokkos::find_end_iterator_api_default", ex, first,
+                             last, s_first, s_last);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 find_end(const std::string& label, const ExecutionSpace& ex,
+                       IteratorType1 first, IteratorType1 last,
+                       IteratorType2 s_first, IteratorType2 s_last) {
+  return Impl::find_end_impl(label, ex, first, last, s_first, s_last);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto find_end(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view,
+              const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_end_impl("Kokkos::find_end_view_api_default", ex,
+                             KE::begin(view), KE::end(view), KE::begin(s_view),
+                             KE::end(s_view));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto find_end(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view,
+              const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_end_impl(label, ex, KE::begin(view), KE::end(view),
+                             KE::begin(s_view), KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 find_end(const ExecutionSpace& ex, IteratorType1 first,
+                       IteratorType1 last, IteratorType2 s_first,
+                       IteratorType2 s_last, const BinaryPredicateType& pred) {
+  return Impl::find_end_impl("Kokkos::find_end_iterator_api_default", ex, first,
+                             last, s_first, s_last, pred);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 find_end(const std::string& label, const ExecutionSpace& ex,
+                       IteratorType1 first, IteratorType1 last,
+                       IteratorType2 s_first, IteratorType2 s_last,
+                       const BinaryPredicateType& pred) {
+  return Impl::find_end_impl(label, ex, first, last, s_first, s_last, pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto find_end(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view,
+              const ::Kokkos::View<DataType2, Properties2...>& s_view,
+              const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_end_impl("Kokkos::find_end_view_api_default", ex,
+                             KE::begin(view), KE::end(view), KE::begin(s_view),
+                             KE::end(s_view), pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto find_end(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view,
+              const ::Kokkos::View<DataType2, Properties2...>& s_view,
+              const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_end_impl(label, ex, KE::begin(view), KE::end(view),
+                             KE::begin(s_view), KE::end(s_view), pred);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindFirstOf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindFirstOf.hpp
new file mode 100644 (file)
index 0000000..b8c27cb
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_HPP
+
+#include "impl/Kokkos_FindFirstOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set 1: no binary predicate passed
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 find_first_of(const ExecutionSpace& ex, IteratorType1 first,
+                            IteratorType1 last, IteratorType2 s_first,
+                            IteratorType2 s_last) {
+  return Impl::find_first_of_impl("Kokkos::find_first_of_iterator_api_default",
+                                  ex, first, last, s_first, s_last);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 find_first_of(const std::string& label, const ExecutionSpace& ex,
+                            IteratorType1 first, IteratorType1 last,
+                            IteratorType2 s_first, IteratorType2 s_last) {
+  return Impl::find_first_of_impl(label, ex, first, last, s_first, s_last);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto find_first_of(const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& view,
+                   const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_first_of_impl("Kokkos::find_first_of_view_api_default", ex,
+                                  KE::begin(view), KE::end(view),
+                                  KE::begin(s_view), KE::end(s_view));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto find_first_of(const std::string& label, const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& view,
+                   const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_first_of_impl(label, ex, KE::begin(view), KE::end(view),
+                                  KE::begin(s_view), KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 find_first_of(const ExecutionSpace& ex, IteratorType1 first,
+                            IteratorType1 last, IteratorType2 s_first,
+                            IteratorType2 s_last,
+                            const BinaryPredicateType& pred) {
+  return Impl::find_first_of_impl("Kokkos::find_first_of_iterator_api_default",
+                                  ex, first, last, s_first, s_last, pred);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 find_first_of(const std::string& label, const ExecutionSpace& ex,
+                            IteratorType1 first, IteratorType1 last,
+                            IteratorType2 s_first, IteratorType2 s_last,
+                            const BinaryPredicateType& pred) {
+  return Impl::find_first_of_impl(label, ex, first, last, s_first, s_last,
+                                  pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto find_first_of(const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& view,
+                   const ::Kokkos::View<DataType2, Properties2...>& s_view,
+                   const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_first_of_impl("Kokkos::find_first_of_view_api_default", ex,
+                                  KE::begin(view), KE::end(view),
+                                  KE::begin(s_view), KE::end(s_view), pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto find_first_of(const std::string& label, const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& view,
+                   const ::Kokkos::View<DataType2, Properties2...>& s_view,
+                   const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_first_of_impl(label, ex, KE::begin(view), KE::end(view),
+                                  KE::begin(s_view), KE::end(s_view), pred);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindIf.hpp
new file mode 100644 (file)
index 0000000..54896da
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_IF_HPP
+
+#include "impl/Kokkos_FindIfOrNot.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+IteratorType find_if(const ExecutionSpace& ex, IteratorType first,
+                     IteratorType last, PredicateType predicate) {
+  return Impl::find_if_or_not_impl<true>("Kokkos::find_if_iterator_api_default",
+                                         ex, first, last, std::move(predicate));
+}
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+IteratorType find_if(const std::string& label, const ExecutionSpace& ex,
+                     IteratorType first, IteratorType last,
+                     PredicateType predicate) {
+  return Impl::find_if_or_not_impl<true>(label, ex, first, last,
+                                         std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+auto find_if(const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType, Properties...>& v,
+             Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_if_or_not_impl<true>("Kokkos::find_if_view_api_default", ex,
+                                         KE::begin(v), KE::end(v),
+                                         std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+auto find_if(const std::string& label, const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType, Properties...>& v,
+             Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_if_or_not_impl<true>(label, ex, KE::begin(v), KE::end(v),
+                                         std::move(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindIfNot.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_FindIfNot.hpp
new file mode 100644 (file)
index 0000000..cfe6bb8
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_NOT_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_IF_NOT_HPP
+
+#include "impl/Kokkos_FindIfOrNot.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+IteratorType find_if_not(const ExecutionSpace& ex, IteratorType first,
+                         IteratorType last, Predicate predicate) {
+  return Impl::find_if_or_not_impl<false>(
+      "Kokkos::find_if_not_iterator_api_default", ex, first, last,
+      std::move(predicate));
+}
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+IteratorType find_if_not(const std::string& label, const ExecutionSpace& ex,
+                         IteratorType first, IteratorType last,
+                         Predicate predicate) {
+  return Impl::find_if_or_not_impl<false>(label, ex, first, last,
+                                          std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+auto find_if_not(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v,
+                 Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_if_or_not_impl<false>(
+      "Kokkos::find_if_not_view_api_default", ex, KE::begin(v), KE::end(v),
+      std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+auto find_if_not(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v,
+                 Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::find_if_or_not_impl<false>(label, ex, KE::begin(v), KE::end(v),
+                                          std::move(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ForEach.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ForEach.hpp
new file mode 100644 (file)
index 0000000..8a2f90e
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_HPP
+#define KOKKOS_STD_ALGORITHMS_FOR_EACH_HPP
+
+#include "impl/Kokkos_ForEachForEachN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class UnaryFunctorType>
+UnaryFunctorType for_each(const std::string& label, const ExecutionSpace& ex,
+                          IteratorType first, IteratorType last,
+                          UnaryFunctorType functor) {
+  return Impl::for_each_impl(label, ex, first, last, std::move(functor));
+}
+
+template <class ExecutionSpace, class IteratorType, class UnaryFunctorType>
+UnaryFunctorType for_each(const ExecutionSpace& ex, IteratorType first,
+                          IteratorType last, UnaryFunctorType functor) {
+  return Impl::for_each_impl("Kokkos::for_each_iterator_api_default", ex, first,
+                             last, std::move(functor));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class UnaryFunctorType>
+UnaryFunctorType for_each(const std::string& label, const ExecutionSpace& ex,
+                          const ::Kokkos::View<DataType, Properties...>& v,
+                          UnaryFunctorType functor) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::for_each_impl(label, ex, KE::begin(v), KE::end(v),
+                             std::move(functor));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class UnaryFunctorType>
+UnaryFunctorType for_each(const ExecutionSpace& ex,
+                          const ::Kokkos::View<DataType, Properties...>& v,
+                          UnaryFunctorType functor) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::for_each_impl("Kokkos::for_each_view_api_default", ex,
+                             KE::begin(v), KE::end(v), std::move(functor));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ForEachN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ForEachN.hpp
new file mode 100644 (file)
index 0000000..dd917a3
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_N_HPP
+#define KOKKOS_STD_ALGORITHMS_FOR_EACH_N_HPP
+
+#include "impl/Kokkos_ForEachForEachN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class UnaryFunctorType>
+IteratorType for_each_n(const std::string& label, const ExecutionSpace& ex,
+                        IteratorType first, SizeType n,
+                        UnaryFunctorType functor) {
+  return Impl::for_each_n_impl(label, ex, first, n, std::move(functor));
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class UnaryFunctorType>
+IteratorType for_each_n(const ExecutionSpace& ex, IteratorType first,
+                        SizeType n, UnaryFunctorType functor) {
+  return Impl::for_each_n_impl("Kokkos::for_each_n_iterator_api_default", ex,
+                               first, n, std::move(functor));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class UnaryFunctorType>
+auto for_each_n(const std::string& label, const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType, Properties...>& v, SizeType n,
+                UnaryFunctorType functor) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::for_each_n_impl(label, ex, KE::begin(v), n, std::move(functor));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class UnaryFunctorType>
+auto for_each_n(const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType, Properties...>& v, SizeType n,
+                UnaryFunctorType functor) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::for_each_n_impl("Kokkos::for_each_n_view_api_default", ex,
+                               KE::begin(v), n, std::move(functor));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Generate.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Generate.hpp
new file mode 100644 (file)
index 0000000..955cb42
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_HPP
+#define KOKKOS_STD_ALGORITHMS_GENERATE_HPP
+
+#include "impl/Kokkos_GenerateGenerateN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class Generator>
+void generate(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+              Generator g) {
+  Impl::generate_impl("Kokkos::generate_iterator_api_default", ex, first, last,
+                      std::move(g));
+}
+
+template <class ExecutionSpace, class IteratorType, class Generator>
+void generate(const std::string& label, const ExecutionSpace& ex,
+              IteratorType first, IteratorType last, Generator g) {
+  Impl::generate_impl(label, ex, first, last, std::move(g));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Generator>
+void generate(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& view,
+              Generator g) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  Impl::generate_impl("Kokkos::generate_view_api_default", ex, begin(view),
+                      end(view), std::move(g));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Generator>
+void generate(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& view,
+              Generator g) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  Impl::generate_impl(label, ex, begin(view), end(view), std::move(g));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_GenerateN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_GenerateN.hpp
new file mode 100644 (file)
index 0000000..470edb1
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_N_HPP
+#define KOKKOS_STD_ALGORITHMS_GENERATE_N_HPP
+
+#include "impl/Kokkos_GenerateGenerateN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class Size, class Generator>
+IteratorType generate_n(const ExecutionSpace& ex, IteratorType first,
+                        Size count, Generator g) {
+  Impl::generate_n_impl("Kokkos::generate_n_iterator_api_default", ex, first,
+                        count, std::move(g));
+  return first + count;
+}
+
+template <class ExecutionSpace, class IteratorType, class Size, class Generator>
+IteratorType generate_n(const std::string& label, const ExecutionSpace& ex,
+                        IteratorType first, Size count, Generator g) {
+  Impl::generate_n_impl(label, ex, first, count, std::move(g));
+  return first + count;
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class Size,
+          class Generator>
+auto generate_n(const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType, Properties...>& view, Size count,
+                Generator g) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::generate_n_impl("Kokkos::generate_n_view_api_default", ex,
+                               begin(view), count, std::move(g));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties, class Size,
+          class Generator>
+auto generate_n(const std::string& label, const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType, Properties...>& view, Size count,
+                Generator g) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::generate_n_impl(label, ex, begin(view), count, std::move(g));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_InclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_InclusiveScan.hpp
new file mode 100644 (file)
index 0000000..c34b5f4
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_InclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set 1
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+               InputIteratorType last, OutputIteratorType first_dest) {
+  return Impl::inclusive_scan_default_op_impl(
+      "Kokkos::inclusive_scan_default_functors_iterator_api", ex, first, last,
+      first_dest);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+               InputIteratorType first, InputIteratorType last,
+               OutputIteratorType first_dest) {
+  return Impl::inclusive_scan_default_op_impl(label, ex, first, last,
+                                              first_dest);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto inclusive_scan(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::inclusive_scan_default_op_impl(
+      "Kokkos::inclusive_scan_default_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto inclusive_scan(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::inclusive_scan_default_op_impl(label, ex, KE::cbegin(view_from),
+                                              KE::cend(view_from),
+                                              KE::begin(view_dest));
+}
+
+// overload set 2 (accepting custom binary op)
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOp>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+               InputIteratorType last, OutputIteratorType first_dest,
+               BinaryOp binary_op) {
+  return Impl::inclusive_scan_custom_binary_op_impl(
+      "Kokkos::inclusive_scan_custom_functors_iterator_api", ex, first, last,
+      first_dest, binary_op);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOp>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+               InputIteratorType first, InputIteratorType last,
+               OutputIteratorType first_dest, BinaryOp binary_op) {
+  return Impl::inclusive_scan_custom_binary_op_impl(label, ex, first, last,
+                                                    first_dest, binary_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOp>
+auto inclusive_scan(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    BinaryOp binary_op) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::inclusive_scan_custom_binary_op_impl(
+      "Kokkos::inclusive_scan_custom_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+      binary_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOp>
+auto inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    BinaryOp binary_op) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::inclusive_scan_custom_binary_op_impl(
+      label, ex, KE::cbegin(view_from), KE::cend(view_from),
+      KE::begin(view_dest), binary_op);
+}
+
+// overload set 3
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOp, class ValueType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+               InputIteratorType last, OutputIteratorType first_dest,
+               BinaryOp binary_op, ValueType init_value) {
+  return Impl::inclusive_scan_custom_binary_op_impl(
+      "Kokkos::inclusive_scan_custom_functors_iterator_api", ex, first, last,
+      first_dest, binary_op, init_value);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOp, class ValueType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+               InputIteratorType first, InputIteratorType last,
+               OutputIteratorType first_dest, BinaryOp binary_op,
+               ValueType init_value) {
+  return Impl::inclusive_scan_custom_binary_op_impl(
+      label, ex, first, last, first_dest, binary_op, init_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOp,
+          class ValueType>
+auto inclusive_scan(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    BinaryOp binary_op, ValueType init_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::inclusive_scan_custom_binary_op_impl(
+      "Kokkos::inclusive_scan_custom_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+      binary_op, init_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOp,
+          class ValueType>
+auto inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    BinaryOp binary_op, ValueType init_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::inclusive_scan_custom_binary_op_impl(
+      label, ex, KE::cbegin(view_from), KE::cend(view_from),
+      KE::begin(view_dest), binary_op, init_value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsPartitioned.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsPartitioned.hpp
new file mode 100644 (file)
index 0000000..8a2ca20
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_HPP
+
+#include "impl/Kokkos_IsPartitioned.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+bool is_partitioned(const ExecutionSpace& ex, IteratorType first,
+                    IteratorType last, PredicateType p) {
+  return Impl::is_partitioned_impl(
+      "Kokkos::is_partitioned_iterator_api_default", ex, first, last,
+      std::move(p));
+}
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+bool is_partitioned(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, IteratorType last, PredicateType p) {
+  return Impl::is_partitioned_impl(label, ex, first, last, std::move(p));
+}
+
+template <class ExecutionSpace, class PredicateType, class DataType,
+          class... Properties>
+bool is_partitioned(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType, Properties...>& v,
+                    PredicateType p) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::is_partitioned_impl("Kokkos::is_partitioned_view_api_default",
+                                   ex, cbegin(v), cend(v), std::move(p));
+}
+
+template <class ExecutionSpace, class PredicateType, class DataType,
+          class... Properties>
+bool is_partitioned(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType, Properties...>& v,
+                    PredicateType p) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::is_partitioned_impl(label, ex, cbegin(v), cend(v), std::move(p));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsSorted.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsSorted.hpp
new file mode 100644 (file)
index 0000000..0ab466f
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_SORTED_HPP
+
+#include "impl/Kokkos_IsSorted.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+bool is_sorted(const ExecutionSpace& ex, IteratorType first,
+               IteratorType last) {
+  return Impl::is_sorted_impl("Kokkos::is_sorted_iterator_api_default", ex,
+                              first, last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+               IteratorType first, IteratorType last) {
+  return Impl::is_sorted_impl(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+bool is_sorted(const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_impl("Kokkos::is_sorted_view_api_default", ex,
+                              KE::cbegin(view), KE::cend(view));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_impl(label, ex, KE::cbegin(view), KE::cend(view));
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+bool is_sorted(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+               ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  return Impl::is_sorted_impl("Kokkos::is_sorted_iterator_api_default", ex,
+                              first, last, std::move(comp));
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+               IteratorType first, IteratorType last, ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  return Impl::is_sorted_impl(label, ex, first, last, std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ComparatorType>
+bool is_sorted(const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType, Properties...>& view,
+               ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_impl("Kokkos::is_sorted_view_api_default", ex,
+                              KE::cbegin(view), KE::cend(view),
+                              std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ComparatorType>
+bool is_sorted(const std::string& label, const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType, Properties...>& view,
+               ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_impl(label, ex, KE::cbegin(view), KE::cend(view),
+                              std::move(comp));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsSortedUntil.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IsSortedUntil.hpp
new file mode 100644 (file)
index 0000000..c480d9e
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_HPP
+
+#include "impl/Kokkos_IsSortedUntil.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType is_sorted_until(const ExecutionSpace& ex, IteratorType first,
+                             IteratorType last) {
+  return Impl::is_sorted_until_impl(
+      "Kokkos::is_sorted_until_iterator_api_default", ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType first, IteratorType last) {
+  return Impl::is_sorted_until_impl(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto is_sorted_until(const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_until_impl("Kokkos::is_sorted_until_view_api_default",
+                                    ex, KE::begin(view), KE::end(view));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_until_impl(label, ex, KE::begin(view), KE::end(view));
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+IteratorType is_sorted_until(const ExecutionSpace& ex, IteratorType first,
+                             IteratorType last, ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  return Impl::is_sorted_until_impl(
+      "Kokkos::is_sorted_until_iterator_api_default", ex, first, last,
+      std::move(comp));
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+IteratorType is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType first, IteratorType last,
+                             ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::is_sorted_until_impl(label, ex, first, last, std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ComparatorType>
+auto is_sorted_until(const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType, Properties...>& view,
+                     ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_until_impl("Kokkos::is_sorted_until_view_api_default",
+                                    ex, KE::begin(view), KE::end(view),
+                                    std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ComparatorType>
+auto is_sorted_until(const std::string& label, const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType, Properties...>& view,
+                     ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::is_sorted_until_impl(label, ex, KE::begin(view), KE::end(view),
+                                    std::move(comp));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IterSwap.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_IterSwap.hpp
new file mode 100644 (file)
index 0000000..1174740
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ITER_SWAP_HPP
+#define KOKKOS_STD_ALGORITHMS_ITER_SWAP_HPP
+
+#include <Kokkos_Core.hpp>
+#include "impl/Kokkos_Constraints.hpp"
+#include "Kokkos_Swap.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType1, class IteratorType2>
+struct StdIterSwapFunctor {
+  IteratorType1 m_a;
+  IteratorType2 m_b;
+
+  KOKKOS_FUNCTION
+  void operator()(int i) const {
+    (void)i;
+    ::Kokkos::Experimental::swap(*m_a, *m_b);
+  }
+
+  KOKKOS_FUNCTION
+  StdIterSwapFunctor(IteratorType1 _a, IteratorType2 _b)
+      : m_a(std::move(_a)), m_b(std::move(_b)) {}
+};
+
+template <class IteratorType1, class IteratorType2>
+void iter_swap_impl(IteratorType1 a, IteratorType2 b) {
+  // is there a better way to do this maybe?
+  ::Kokkos::parallel_for(
+      1, StdIterSwapFunctor<IteratorType1, IteratorType2>(a, b));
+  Kokkos::DefaultExecutionSpace().fence(
+      "Kokkos::iter_swap: fence after operation");
+}
+}  // namespace Impl
+//----------------------------------------------------------------------------
+
+// iter_swap
+template <class IteratorType1, class IteratorType2>
+void iter_swap(IteratorType1 a, IteratorType2 b) {
+  Impl::iter_swap_impl(a, b);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_LexicographicalCompare.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_LexicographicalCompare.hpp
new file mode 100644 (file)
index 0000000..4704a9e
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_HPP
+#define KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_HPP
+
+#include "impl/Kokkos_LexicographicalCompare.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+bool lexicographical_compare(const ExecutionSpace& ex, IteratorType1 first1,
+                             IteratorType1 last1, IteratorType2 first2,
+                             IteratorType2 last2) {
+  return Impl::lexicographical_compare_impl(
+      "Kokkos::lexicographical_compare_iterator_api_default", ex, first1, last1,
+      first2, last2);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+bool lexicographical_compare(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType1 first1, IteratorType1 last1,
+                             IteratorType2 first2, IteratorType2 last2) {
+  return Impl::lexicographical_compare_impl(label, ex, first1, last1, first2,
+                                            last2);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+bool lexicographical_compare(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view1,
+    ::Kokkos::View<DataType2, Properties2...>& view2) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::lexicographical_compare_impl(
+      "Kokkos::lexicographical_compare_view_api_default", ex, KE::cbegin(view1),
+      KE::cend(view1), KE::cbegin(view2), KE::cend(view2));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+bool lexicographical_compare(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view1,
+    ::Kokkos::View<DataType2, Properties2...>& view2) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::lexicographical_compare_impl(label, ex, KE::cbegin(view1),
+                                            KE::cend(view1), KE::cbegin(view2),
+                                            KE::cend(view2));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ComparatorType>
+bool lexicographical_compare(const ExecutionSpace& ex, IteratorType1 first1,
+                             IteratorType1 last1, IteratorType2 first2,
+                             IteratorType2 last2, ComparatorType comp) {
+  return Impl::lexicographical_compare_impl(
+      "Kokkos::lexicographical_compare_iterator_api_default", ex, first1, last1,
+      first2, last2, comp);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ComparatorType>
+bool lexicographical_compare(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType1 first1, IteratorType1 last1,
+                             IteratorType2 first2, IteratorType2 last2,
+                             ComparatorType comp) {
+  return Impl::lexicographical_compare_impl(label, ex, first1, last1, first2,
+                                            last2, comp);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ComparatorType>
+bool lexicographical_compare(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view1,
+    ::Kokkos::View<DataType2, Properties2...>& view2, ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::lexicographical_compare_impl(
+      "Kokkos::lexicographical_compare_view_api_default", ex, KE::cbegin(view1),
+      KE::cend(view1), KE::cbegin(view2), KE::cend(view2), comp);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ComparatorType>
+bool lexicographical_compare(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view1,
+    ::Kokkos::View<DataType2, Properties2...>& view2, ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::lexicographical_compare_impl(label, ex, KE::cbegin(view1),
+                                            KE::cend(view1), KE::cbegin(view2),
+                                            KE::cend(view2), comp);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MaxElement.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MaxElement.hpp
new file mode 100644 (file)
index 0000000..5f6e5cb
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MAX_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_MAX_ELEMENT_HPP
+
+#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+auto max_element(const ExecutionSpace& ex, IteratorType first,
+                 IteratorType last) {
+  return Impl::min_or_max_element_impl<MaxFirstLoc>(
+      "Kokkos::max_element_iterator_api_default", ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first, IteratorType last) {
+  return Impl::min_or_max_element_impl<MaxFirstLoc>(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+auto max_element(const ExecutionSpace& ex, IteratorType first,
+                 IteratorType last, ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
+      "Kokkos::max_element_iterator_api_default", ex, first, last,
+      std::move(comp));
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first, IteratorType last, ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
+      label, ex, first, last, std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto max_element(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::min_or_max_element_impl<MaxFirstLoc>(
+      "Kokkos::max_element_view_api_default", ex, begin(v), end(v));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::min_or_max_element_impl<MaxFirstLoc>(label, ex, begin(v),
+                                                    end(v));
+}
+
+template <class ExecutionSpace, class DataType, class ComparatorType,
+          class... Properties>
+auto max_element(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v,
+                 ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
+      "Kokkos::max_element_view_api_default", ex, begin(v), end(v),
+      std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class ComparatorType,
+          class... Properties>
+auto max_element(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v,
+                 ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MaxFirstLocCustomComparator>(
+      label, ex, begin(v), end(v), std::move(comp));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MinElement.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MinElement.hpp
new file mode 100644 (file)
index 0000000..63cc548
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MIN_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_MIN_ELEMENT_HPP
+
+#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+auto min_element(const ExecutionSpace& ex, IteratorType first,
+                 IteratorType last) {
+  return Impl::min_or_max_element_impl<MinFirstLoc>(
+      "Kokkos::min_element_iterator_api_default", ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first, IteratorType last) {
+  return Impl::min_or_max_element_impl<MinFirstLoc>(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+auto min_element(const ExecutionSpace& ex, IteratorType first,
+                 IteratorType last, ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
+      "Kokkos::min_element_iterator_api_default", ex, first, last,
+      std::move(comp));
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first, IteratorType last, ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
+      label, ex, first, last, std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto min_element(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::min_or_max_element_impl<MinFirstLoc>(
+      "Kokkos::min_element_view_api_default", ex, begin(v), end(v));
+}
+
+template <class ExecutionSpace, class DataType, class ComparatorType,
+          class... Properties>
+auto min_element(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v,
+                 ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
+      "Kokkos::min_element_view_api_default", ex, begin(v), end(v),
+      std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::min_or_max_element_impl<MinFirstLoc>(label, ex, begin(v),
+                                                    end(v));
+}
+
+template <class ExecutionSpace, class DataType, class ComparatorType,
+          class... Properties>
+auto min_element(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& v,
+                 ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::min_or_max_element_impl<MinFirstLocCustomComparator>(
+      label, ex, begin(v), end(v), std::move(comp));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MinMaxElement.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MinMaxElement.hpp
new file mode 100644 (file)
index 0000000..07cdefc
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MINMAX_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_MINMAX_ELEMENT_HPP
+
+#include "impl/Kokkos_MinMaxMinmaxElement.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+auto minmax_element(const ExecutionSpace& ex, IteratorType first,
+                    IteratorType last) {
+  return Impl::minmax_element_impl<MinMaxFirstLastLoc>(
+      "Kokkos::minmax_element_iterator_api_default", ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, IteratorType last) {
+  return Impl::minmax_element_impl<MinMaxFirstLastLoc>(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+auto minmax_element(const ExecutionSpace& ex, IteratorType first,
+                    IteratorType last, ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
+      "Kokkos::minmax_element_iterator_api_default", ex, first, last,
+      std::move(comp));
+}
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, IteratorType last,
+                    ComparatorType comp) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
+      label, ex, first, last, std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto minmax_element(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::minmax_element_impl<MinMaxFirstLastLoc>(
+      "Kokkos::minmax_element_view_api_default", ex, begin(v), end(v));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType, Properties...>& v) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  return Impl::minmax_element_impl<MinMaxFirstLastLoc>(label, ex, begin(v),
+                                                       end(v));
+}
+
+template <class ExecutionSpace, class DataType, class ComparatorType,
+          class... Properties>
+auto minmax_element(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType, Properties...>& v,
+                    ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
+      "Kokkos::minmax_element_view_api_default", ex, begin(v), end(v),
+      std::move(comp));
+}
+
+template <class ExecutionSpace, class DataType, class ComparatorType,
+          class... Properties>
+auto minmax_element(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType, Properties...>& v,
+                    ComparatorType comp) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::minmax_element_impl<MinMaxFirstLastLocCustomComparator>(
+      label, ex, begin(v), end(v), std::move(comp));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Mismatch.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Mismatch.hpp
new file mode 100644 (file)
index 0000000..3418e04
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MISMATCH_HPP
+#define KOKKOS_STD_ALGORITHMS_MISMATCH_HPP
+
+#include "impl/Kokkos_Mismatch.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// FIXME: add mismatch overloads accepting 3 iterators.
+// An overload consistent with other algorithms:
+//
+// auto mismatch(const ExecSpace& ex, It1 first1, It1 last1, It2 first2) {...}
+//
+// makes API ambiguous (with the overload accepting views).
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(const ExecutionSpace& ex,
+                                                      IteratorType1 first1,
+                                                      IteratorType1 last1,
+                                                      IteratorType2 first2,
+                                                      IteratorType2 last2) {
+  return Impl::mismatch_impl("Kokkos::mismatch_iterator_api_default", ex,
+                             first1, last1, first2, last2);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+    const ExecutionSpace& ex, IteratorType1 first1, IteratorType1 last1,
+    IteratorType2 first2, IteratorType2 last2,
+    BinaryPredicateType&& predicate) {
+  return Impl::mismatch_impl("Kokkos::mismatch_iterator_api_default", ex,
+                             first1, last1, first2, last2,
+                             std::forward<BinaryPredicateType>(predicate));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+    const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+    IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
+  return Impl::mismatch_impl(label, ex, first1, last1, first2, last2);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch(
+    const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+    IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
+    BinaryPredicateType&& predicate) {
+  return Impl::mismatch_impl(label, ex, first1, last1, first2, last2,
+                             std::forward<BinaryPredicateType>(predicate));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto mismatch(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view1,
+              const ::Kokkos::View<DataType2, Properties2...>& view2) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::mismatch_impl("Kokkos::mismatch_view_api_default", ex,
+                             KE::begin(view1), KE::end(view1), KE::begin(view2),
+                             KE::end(view2));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto mismatch(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view1,
+              const ::Kokkos::View<DataType2, Properties2...>& view2,
+              BinaryPredicateType&& predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::mismatch_impl("Kokkos::mismatch_view_api_default", ex,
+                             KE::begin(view1), KE::end(view1), KE::begin(view2),
+                             KE::end(view2),
+                             std::forward<BinaryPredicateType>(predicate));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto mismatch(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view1,
+              const ::Kokkos::View<DataType2, Properties2...>& view2) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::mismatch_impl(label, ex, KE::begin(view1), KE::end(view1),
+                             KE::begin(view2), KE::end(view2));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto mismatch(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType1, Properties1...>& view1,
+              const ::Kokkos::View<DataType2, Properties2...>& view2,
+              BinaryPredicateType&& predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view2);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::mismatch_impl(label, ex, KE::begin(view1), KE::end(view1),
+                             KE::begin(view2), KE::end(view2),
+                             std::forward<BinaryPredicateType>(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Move.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Move.hpp
new file mode 100644 (file)
index 0000000..c2ce465
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_HPP
+
+#include "impl/Kokkos_Move.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator move(const ExecutionSpace& ex, InputIterator first,
+                    InputIterator last, OutputIterator d_first) {
+  return Impl::move_impl("Kokkos::move_iterator_api_default", ex, first, last,
+                         d_first);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator move(const std::string& label, const ExecutionSpace& ex,
+                    InputIterator first, InputIterator last,
+                    OutputIterator d_first) {
+  return Impl::move_impl(label, ex, first, last, d_first);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto move(const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType1, Properties1...>& source,
+          ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::move_impl("Kokkos::move_view_api_default", ex, begin(source),
+                         end(source), begin(dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto move(const std::string& label, const ExecutionSpace& ex,
+          const ::Kokkos::View<DataType1, Properties1...>& source,
+          ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::move_impl(label, ex, begin(source), end(source), begin(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MoveBackward.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_MoveBackward.hpp
new file mode 100644 (file)
index 0000000..f7462d5
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_HPP
+
+#include "impl/Kokkos_MoveBackward.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 move_backward(const ExecutionSpace& ex, IteratorType1 first,
+                            IteratorType1 last, IteratorType2 d_last) {
+  return Impl::move_backward_impl("Kokkos::move_backward_iterator_api_default",
+                                  ex, first, last, d_last);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto move_backward(const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& source,
+                   ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::move_backward_impl("Kokkos::move_backward_view_api_default", ex,
+                                  begin(source), end(source), end(dest));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 move_backward(const std::string& label, const ExecutionSpace& ex,
+                            IteratorType1 first, IteratorType1 last,
+                            IteratorType2 d_last) {
+  return Impl::move_backward_impl(label, ex, first, last, d_last);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto move_backward(const std::string& label, const ExecutionSpace& ex,
+                   const ::Kokkos::View<DataType1, Properties1...>& source,
+                   ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::move_backward_impl(label, ex, begin(source), end(source),
+                                  end(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_NoneOf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_NoneOf.hpp
new file mode 100644 (file)
index 0000000..30ffb52
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_NONE_OF_HPP
+#define KOKKOS_STD_ALGORITHMS_NONE_OF_HPP
+
+#include "impl/Kokkos_AllOfAnyOfNoneOf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+bool none_of(const ExecutionSpace& ex, IteratorType first, IteratorType last,
+             Predicate predicate) {
+  return Impl::none_of_impl("Kokkos::none_of_iterator_api_default", ex, first,
+                            last, predicate);
+}
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+bool none_of(const std::string& label, const ExecutionSpace& ex,
+             IteratorType first, IteratorType last, Predicate predicate) {
+  return Impl::none_of_impl(label, ex, first, last, predicate);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+bool none_of(const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType, Properties...>& v,
+             Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::none_of_impl("Kokkos::none_of_view_api_default", ex,
+                            KE::cbegin(v), KE::cend(v), std::move(predicate));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class Predicate>
+bool none_of(const std::string& label, const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType, Properties...>& v,
+             Predicate predicate) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::none_of_impl(label, ex, KE::cbegin(v), KE::cend(v),
+                            std::move(predicate));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_PartitionCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_PartitionCopy.hpp
new file mode 100644 (file)
index 0000000..5b00669
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_PARTITION_COPY_HPP
+
+#include "impl/Kokkos_PartitionCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorTrueType, class OutputIteratorFalseType,
+          class PredicateType>
+::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType> partition_copy(
+    const ExecutionSpace& ex, InputIteratorType from_first,
+    InputIteratorType from_last, OutputIteratorTrueType to_first_true,
+    OutputIteratorFalseType to_first_false, PredicateType p) {
+  return Impl::partition_copy_impl(
+      "Kokkos::partition_copy_iterator_api_default", ex, from_first, from_last,
+      to_first_true, to_first_false, std::move(p));
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorTrueType, class OutputIteratorFalseType,
+          class PredicateType>
+::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType> partition_copy(
+    const std::string& label, const ExecutionSpace& ex,
+    InputIteratorType from_first, InputIteratorType from_last,
+    OutputIteratorTrueType to_first_true,
+    OutputIteratorFalseType to_first_false, PredicateType p) {
+  return Impl::partition_copy_impl(label, ex, from_first, from_last,
+                                   to_first_true, to_first_false, std::move(p));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class DataType3,
+          class... Properties3, class PredicateType>
+auto partition_copy(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest_true,
+    const ::Kokkos::View<DataType3, Properties3...>& view_dest_false,
+    PredicateType p) {
+  return Impl::partition_copy_impl("Kokkos::partition_copy_view_api_default",
+                                   ex, cbegin(view_from), cend(view_from),
+                                   begin(view_dest_true),
+                                   begin(view_dest_false), std::move(p));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class DataType3,
+          class... Properties3, class PredicateType>
+auto partition_copy(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest_true,
+    const ::Kokkos::View<DataType3, Properties3...>& view_dest_false,
+    PredicateType p) {
+  return Impl::partition_copy_impl(label, ex, cbegin(view_from),
+                                   cend(view_from), begin(view_dest_true),
+                                   begin(view_dest_false), std::move(p));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_PartitionPoint.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_PartitionPoint.hpp
new file mode 100644 (file)
index 0000000..b714d5a
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_POINT_HPP
+#define KOKKOS_STD_ALGORITHMS_PARTITION_POINT_HPP
+
+#include "impl/Kokkos_PartitionPoint.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType, class UnaryPredicate>
+IteratorType partition_point(const ExecutionSpace& ex, IteratorType first,
+                             IteratorType last, UnaryPredicate p) {
+  return Impl::partition_point_impl(
+      "Kokkos::partitioned_point_iterator_api_default", ex, first, last,
+      std::move(p));
+}
+
+template <class ExecutionSpace, class IteratorType, class UnaryPredicate>
+IteratorType partition_point(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType first, IteratorType last,
+                             UnaryPredicate p) {
+  return Impl::partition_point_impl(label, ex, first, last, std::move(p));
+}
+
+template <class ExecutionSpace, class UnaryPredicate, class DataType,
+          class... Properties>
+auto partition_point(const std::string& label, const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType, Properties...>& v,
+                     UnaryPredicate p) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  return Impl::partition_point_impl(label, ex, begin(v), end(v), std::move(p));
+}
+
+template <class ExecutionSpace, class UnaryPredicate, class DataType,
+          class... Properties>
+auto partition_point(const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType, Properties...>& v,
+                     UnaryPredicate p) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(v);
+  return Impl::partition_point_impl("Kokkos::partition_point_view_api_default",
+                                    ex, begin(v), end(v), std::move(p));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Reduce.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Reduce.hpp
new file mode 100644 (file)
index 0000000..3cf9153
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REDUCE_HPP
+#define KOKKOS_STD_ALGORITHMS_REDUCE_HPP
+
+#include "impl/Kokkos_Reduce.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+//
+// overload set 1
+//
+template <class ExecutionSpace, class IteratorType>
+typename IteratorType::value_type reduce(const ExecutionSpace& ex,
+                                         IteratorType first,
+                                         IteratorType last) {
+  return Impl::reduce_default_functors_impl(
+      "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
+      typename IteratorType::value_type());
+}
+
+template <class ExecutionSpace, class IteratorType>
+typename IteratorType::value_type reduce(const std::string& label,
+                                         const ExecutionSpace& ex,
+                                         IteratorType first,
+                                         IteratorType last) {
+  return Impl::reduce_default_functors_impl(
+      label, ex, first, last, typename IteratorType::value_type());
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto reduce(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  using view_type  = ::Kokkos::View<DataType, Properties...>;
+  using value_type = typename view_type::value_type;
+
+  return Impl::reduce_default_functors_impl(
+      "Kokkos::reduce_default_functors_view_api", ex, KE::cbegin(view),
+      KE::cend(view), value_type());
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto reduce(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  using view_type  = ::Kokkos::View<DataType, Properties...>;
+  using value_type = typename view_type::value_type;
+
+  return Impl::reduce_default_functors_impl(label, ex, KE::cbegin(view),
+                                            KE::cend(view), value_type());
+}
+
+//
+// overload set2:
+//
+template <class ExecutionSpace, class IteratorType, class ValueType>
+ValueType reduce(const ExecutionSpace& ex, IteratorType first,
+                 IteratorType last, ValueType init_reduction_value) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::reduce_default_functors_impl(
+      "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
+      init_reduction_value);
+}
+
+template <class ExecutionSpace, class IteratorType, class ValueType>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first, IteratorType last,
+                 ValueType init_reduction_value) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::reduce_default_functors_impl(label, ex, first, last,
+                                            init_reduction_value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType>
+ValueType reduce(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& view,
+                 ValueType init_reduction_value) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::reduce_default_functors_impl(
+      "Kokkos::reduce_default_functors_view_api", ex, KE::cbegin(view),
+      KE::cend(view), init_reduction_value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& view,
+                 ValueType init_reduction_value) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::reduce_default_functors_impl(
+      label, ex, KE::cbegin(view), KE::cend(view), init_reduction_value);
+}
+
+//
+// overload set 3
+//
+template <class ExecutionSpace, class IteratorType, class ValueType,
+          class BinaryOp>
+ValueType reduce(const ExecutionSpace& ex, IteratorType first,
+                 IteratorType last, ValueType init_reduction_value,
+                 BinaryOp joiner) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::reduce_custom_functors_impl(
+      "Kokkos::reduce_default_functors_iterator_api", ex, first, last,
+      init_reduction_value, joiner);
+}
+
+template <class ExecutionSpace, class IteratorType, class ValueType,
+          class BinaryOp>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first, IteratorType last,
+                 ValueType init_reduction_value, BinaryOp joiner) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::reduce_custom_functors_impl(label, ex, first, last,
+                                           init_reduction_value, joiner);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType, class BinaryOp>
+ValueType reduce(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& view,
+                 ValueType init_reduction_value, BinaryOp joiner) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::reduce_custom_functors_impl(
+      "Kokkos::reduce_custom_functors_view_api", ex, KE::cbegin(view),
+      KE::cend(view), init_reduction_value, joiner);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType, class BinaryOp>
+ValueType reduce(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& view,
+                 ValueType init_reduction_value, BinaryOp joiner) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::reduce_custom_functors_impl(label, ex, KE::cbegin(view),
+                                           KE::cend(view), init_reduction_value,
+                                           joiner);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Remove.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Remove.hpp
new file mode 100644 (file)
index 0000000..d8d7c99
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class Iterator, class ValueType>
+Iterator remove(const ExecutionSpace& ex, Iterator first, Iterator last,
+                const ValueType& value) {
+  return Impl::remove_impl("Kokkos::remove_iterator_api_default", ex, first,
+                           last, value);
+}
+
+template <class ExecutionSpace, class Iterator, class ValueType>
+Iterator remove(const std::string& label, const ExecutionSpace& ex,
+                Iterator first, Iterator last, const ValueType& value) {
+  return Impl::remove_impl(label, ex, first, last, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType>
+auto remove(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view,
+            const ValueType& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::remove_impl("Kokkos::remove_iterator_api_default", ex,
+                           ::Kokkos::Experimental::begin(view),
+                           ::Kokkos::Experimental::end(view), value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType>
+auto remove(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view,
+            const ValueType& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::remove_impl(label, ex, ::Kokkos::Experimental::begin(view),
+                           ::Kokkos::Experimental::end(view), value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveCopy.hpp
new file mode 100644 (file)
index 0000000..7d5c163
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_COPY_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class ValueType>
+OutputIterator remove_copy(const ExecutionSpace& ex, InputIterator first_from,
+                           InputIterator last_from, OutputIterator first_dest,
+                           const ValueType& value) {
+  return Impl::remove_copy_impl("Kokkos::remove_copy_iterator_api_default", ex,
+                                first_from, last_from, first_dest, value);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class ValueType>
+OutputIterator remove_copy(const std::string& label, const ExecutionSpace& ex,
+                           InputIterator first_from, InputIterator last_from,
+                           OutputIterator first_dest, const ValueType& value) {
+  return Impl::remove_copy_impl(label, ex, first_from, last_from, first_dest,
+                                value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+auto remove_copy(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                 const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                 const ValueType& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+  return Impl::remove_copy_impl("Kokkos::remove_copy_iterator_api_default", ex,
+                                ::Kokkos::Experimental::cbegin(view_from),
+                                ::Kokkos::Experimental::cend(view_from),
+                                ::Kokkos::Experimental::begin(view_dest),
+                                value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+auto remove_copy(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                 const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                 const ValueType& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+  return Impl::remove_copy_impl(
+      label, ex, ::Kokkos::Experimental::cbegin(view_from),
+      ::Kokkos::Experimental::cend(view_from),
+      ::Kokkos::Experimental::begin(view_dest), value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveCopyIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveCopyIf.hpp
new file mode 100644 (file)
index 0000000..8a9a3e4
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_COPY_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_COPY_IF_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class UnaryPredicate>
+OutputIterator remove_copy_if(const ExecutionSpace& ex,
+                              InputIterator first_from, InputIterator last_from,
+                              OutputIterator first_dest,
+                              const UnaryPredicate& pred) {
+  return Impl::remove_copy_if_impl(
+      "Kokkos::remove_copy_if_iterator_api_default", ex, first_from, last_from,
+      first_dest, pred);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class UnaryPredicate>
+OutputIterator remove_copy_if(const std::string& label,
+                              const ExecutionSpace& ex,
+                              InputIterator first_from, InputIterator last_from,
+                              OutputIterator first_dest,
+                              const UnaryPredicate& pred) {
+  return Impl::remove_copy_if_impl(label, ex, first_from, last_from, first_dest,
+                                   pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class UnaryPredicate>
+auto remove_copy_if(const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    const UnaryPredicate& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+  return Impl::remove_copy_if_impl(
+      "Kokkos::remove_copy_if_iterator_api_default", ex,
+      ::Kokkos::Experimental::cbegin(view_from),
+      ::Kokkos::Experimental::cend(view_from),
+      ::Kokkos::Experimental::begin(view_dest), pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class UnaryPredicate>
+auto remove_copy_if(const std::string& label, const ExecutionSpace& ex,
+                    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                    const UnaryPredicate& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+
+  return Impl::remove_copy_if_impl(
+      label, ex, ::Kokkos::Experimental::cbegin(view_from),
+      ::Kokkos::Experimental::cend(view_from),
+      ::Kokkos::Experimental::begin(view_dest), pred);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RemoveIf.hpp
new file mode 100644 (file)
index 0000000..e4171ca
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_IF_HPP
+
+#include "impl/Kokkos_RemoveAllVariants.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class Iterator, class UnaryPredicate>
+Iterator remove_if(const ExecutionSpace& ex, Iterator first, Iterator last,
+                   UnaryPredicate pred) {
+  return Impl::remove_if_impl("Kokkos::remove_if_iterator_api_default", ex,
+                              first, last, pred);
+}
+
+template <class ExecutionSpace, class Iterator, class UnaryPredicate>
+Iterator remove_if(const std::string& label, const ExecutionSpace& ex,
+                   Iterator first, Iterator last, UnaryPredicate pred) {
+  return Impl::remove_if_impl(label, ex, first, last, pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class UnaryPredicate>
+auto remove_if(const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType, Properties...>& view,
+               UnaryPredicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::remove_if_impl("Kokkos::remove_if_iterator_api_default", ex,
+                              ::Kokkos::Experimental::begin(view),
+                              ::Kokkos::Experimental::end(view), pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class UnaryPredicate>
+auto remove_if(const std::string& label, const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType, Properties...>& view,
+               UnaryPredicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::remove_if_impl(label, ex, ::Kokkos::Experimental::begin(view),
+                              ::Kokkos::Experimental::end(view), pred);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Replace.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Replace.hpp
new file mode 100644 (file)
index 0000000..10ca46a
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_HPP
+
+#include "impl/Kokkos_Replace.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class Iterator, class ValueType>
+void replace(const ExecutionSpace& ex, Iterator first, Iterator last,
+             const ValueType& old_value, const ValueType& new_value) {
+  return Impl::replace_impl("Kokkos::replace_iterator_api", ex, first, last,
+                            old_value, new_value);
+}
+
+template <class ExecutionSpace, class Iterator, class ValueType>
+void replace(const std::string& label, const ExecutionSpace& ex, Iterator first,
+             Iterator last, const ValueType& old_value,
+             const ValueType& new_value) {
+  return Impl::replace_impl(label, ex, first, last, old_value, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class ValueType>
+void replace(const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType1, Properties1...>& view,
+             const ValueType& old_value, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_impl("Kokkos::replace_view_api", ex, KE::begin(view),
+                            KE::end(view), old_value, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class ValueType>
+void replace(const std::string& label, const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType1, Properties1...>& view,
+             const ValueType& old_value, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_impl(label, ex, KE::begin(view), KE::end(view),
+                            old_value, new_value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceCopy.hpp
new file mode 100644 (file)
index 0000000..f5136eb
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_HPP
+
+#include "impl/Kokkos_ReplaceCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class ValueType>
+OutputIterator replace_copy(const ExecutionSpace& ex, InputIterator first_from,
+                            InputIterator last_from, OutputIterator first_dest,
+                            const ValueType& old_value,
+                            const ValueType& new_value) {
+  return Impl::replace_copy_impl("Kokkos::replace_copy_iterator_api", ex,
+                                 first_from, last_from, first_dest, old_value,
+                                 new_value);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class ValueType>
+OutputIterator replace_copy(const std::string& label, const ExecutionSpace& ex,
+                            InputIterator first_from, InputIterator last_from,
+                            OutputIterator first_dest,
+                            const ValueType& old_value,
+                            const ValueType& new_value) {
+  return Impl::replace_copy_impl(label, ex, first_from, last_from, first_dest,
+                                 old_value, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+auto replace_copy(const ExecutionSpace& ex,
+                  const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                  const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                  const ValueType& old_value, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_copy_impl("Kokkos::replace_copy_view_api", ex,
+                                 KE::cbegin(view_from), KE::cend(view_from),
+                                 KE::begin(view_dest), old_value, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+auto replace_copy(const std::string& label, const ExecutionSpace& ex,
+                  const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                  const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                  const ValueType& old_value, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_copy_impl(label, ex, KE::cbegin(view_from),
+                                 KE::cend(view_from), KE::begin(view_dest),
+                                 old_value, new_value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceCopyIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceCopyIf.hpp
new file mode 100644 (file)
index 0000000..a3f3fe6
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_HPP
+
+#include "impl/Kokkos_ReplaceCopyIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class PredicateType, class ValueType>
+OutputIterator replace_copy_if(const ExecutionSpace& ex,
+                               InputIterator first_from,
+                               InputIterator last_from,
+                               OutputIterator first_dest, PredicateType pred,
+                               const ValueType& new_value) {
+  return Impl::replace_copy_if_impl("Kokkos::replace_copy_if_iterator_api", ex,
+                                    first_from, last_from, first_dest, pred,
+                                    new_value);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class PredicateType, class ValueType>
+OutputIterator replace_copy_if(const std::string& label,
+                               const ExecutionSpace& ex,
+                               InputIterator first_from,
+                               InputIterator last_from,
+                               OutputIterator first_dest, PredicateType pred,
+                               const ValueType& new_value) {
+  return Impl::replace_copy_if_impl(label, ex, first_from, last_from,
+                                    first_dest, pred, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class PredicateType,
+          class ValueType>
+auto replace_copy_if(const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                     const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                     PredicateType pred, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_copy_if_impl("Kokkos::replace_copy_if_view_api", ex,
+                                    KE::cbegin(view_from), KE::cend(view_from),
+                                    KE::begin(view_dest), pred, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class PredicateType,
+          class ValueType>
+auto replace_copy_if(const std::string& label, const ExecutionSpace& ex,
+                     const ::Kokkos::View<DataType1, Properties1...>& view_from,
+                     const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+                     PredicateType pred, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_copy_if_impl(label, ex, KE::cbegin(view_from),
+                                    KE::cend(view_from), KE::begin(view_dest),
+                                    pred, new_value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReplaceIf.hpp
new file mode 100644 (file)
index 0000000..bdb59f2
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IF_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_IF_HPP
+
+#include "impl/Kokkos_ReplaceIf.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class Predicate,
+          class ValueType>
+void replace_if(const ExecutionSpace& ex, InputIterator first,
+                InputIterator last, Predicate pred,
+                const ValueType& new_value) {
+  return Impl::replace_if_impl("Kokkos::replace_if_iterator_api", ex, first,
+                               last, pred, new_value);
+}
+
+template <class ExecutionSpace, class InputIterator, class Predicate,
+          class ValueType>
+void replace_if(const std::string& label, const ExecutionSpace& ex,
+                InputIterator first, InputIterator last, Predicate pred,
+                const ValueType& new_value) {
+  return Impl::replace_if_impl(label, ex, first, last, pred, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class Predicate, class ValueType>
+void replace_if(const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType1, Properties1...>& view,
+                Predicate pred, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_if_impl("Kokkos::replace_if_view_api", ex,
+                               KE::begin(view), KE::end(view), pred, new_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class Predicate, class ValueType>
+void replace_if(const std::string& label, const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType1, Properties1...>& view,
+                Predicate pred, const ValueType& new_value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::replace_if_impl(label, ex, KE::begin(view), KE::end(view), pred,
+                               new_value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Reverse.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Reverse.hpp
new file mode 100644 (file)
index 0000000..4848b20
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_HPP
+
+#include "impl/Kokkos_Reverse.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator>
+void reverse(const ExecutionSpace& ex, InputIterator first,
+             InputIterator last) {
+  return Impl::reverse_impl("Kokkos::reverse_iterator_api_default", ex, first,
+                            last);
+}
+
+template <class ExecutionSpace, class InputIterator>
+void reverse(const std::string& label, const ExecutionSpace& ex,
+             InputIterator first, InputIterator last) {
+  return Impl::reverse_impl(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+void reverse(const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::reverse_impl("Kokkos::reverse_view_api_default", ex,
+                            KE::begin(view), KE::end(view));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+void reverse(const std::string& label, const ExecutionSpace& ex,
+             const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::reverse_impl(label, ex, KE::begin(view), KE::end(view));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReverseCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ReverseCopy.hpp
new file mode 100644 (file)
index 0000000..bb4462b
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_COPY_HPP
+
+#include "impl/Kokkos_ReverseCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator reverse_copy(const ExecutionSpace& ex, InputIterator first,
+                            InputIterator last, OutputIterator d_first) {
+  return Impl::reverse_copy_impl("Kokkos::reverse_copy_iterator_api_default",
+                                 ex, first, last, d_first);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator reverse_copy(const std::string& label, const ExecutionSpace& ex,
+                            InputIterator first, InputIterator last,
+                            OutputIterator d_first) {
+  return Impl::reverse_copy_impl(label, ex, first, last, d_first);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto reverse_copy(const ExecutionSpace& ex,
+                  const ::Kokkos::View<DataType1, Properties1...>& source,
+                  ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::reverse_copy_impl("Kokkos::reverse_copy_view_api_default", ex,
+                                 cbegin(source), cend(source), begin(dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto reverse_copy(const std::string& label, const ExecutionSpace& ex,
+                  const ::Kokkos::View<DataType1, Properties1...>& source,
+                  ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::reverse_copy_impl(label, ex, cbegin(source), cend(source),
+                                 begin(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Rotate.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Rotate.hpp
new file mode 100644 (file)
index 0000000..3997581
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_HPP
+#define KOKKOS_STD_ALGORITHMS_ROTATE_HPP
+
+#include "impl/Kokkos_Rotate.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType rotate(const ExecutionSpace& ex, IteratorType first,
+                    IteratorType n_first, IteratorType last) {
+  return Impl::rotate_impl("Kokkos::rotate_iterator_api_default", ex, first,
+                           n_first, last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType rotate(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, IteratorType n_first,
+                    IteratorType last) {
+  return Impl::rotate_impl(label, ex, first, n_first, last);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto rotate(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view,
+            std::size_t n_location) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::rotate_impl("Kokkos::rotate_view_api_default", ex, begin(view),
+                           begin(view) + n_location, end(view));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto rotate(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view,
+            std::size_t n_location) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::rotate_impl(label, ex, begin(view), begin(view) + n_location,
+                           end(view));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RotateCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_RotateCopy.hpp
new file mode 100644 (file)
index 0000000..f98686a
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_ROTATE_COPY_HPP
+
+#include "impl/Kokkos_RotateCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator rotate_copy(const ExecutionSpace& ex, InputIterator first,
+                           InputIterator n_first, InputIterator last,
+                           OutputIterator d_first) {
+  return Impl::rotate_copy_impl("Kokkos::rotate_copy_iterator_api_default", ex,
+                                first, n_first, last, d_first);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator rotate_copy(const std::string& label, const ExecutionSpace& ex,
+                           InputIterator first, InputIterator n_first,
+                           InputIterator last, OutputIterator d_first) {
+  return Impl::rotate_copy_impl(label, ex, first, n_first, last, d_first);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto rotate_copy(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 std::size_t n_location,
+                 const ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::rotate_copy_impl("Kokkos::rotate_copy_view_api_default", ex,
+                                cbegin(source), cbegin(source) + n_location,
+                                cend(source), begin(dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto rotate_copy(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 std::size_t n_location,
+                 const ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::rotate_copy_impl(label, ex, cbegin(source),
+                                cbegin(source) + n_location, cend(source),
+                                begin(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Search.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Search.hpp
new file mode 100644 (file)
index 0000000..ce656da
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_HPP
+#define KOKKOS_STD_ALGORITHMS_SEARCH_HPP
+
+#include "impl/Kokkos_Search.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set 1: no binary predicate passed
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 search(const ExecutionSpace& ex, IteratorType1 first,
+                     IteratorType1 last, IteratorType2 s_first,
+                     IteratorType2 s_last) {
+  return Impl::search_impl("Kokkos::search_iterator_api_default", ex, first,
+                           last, s_first, s_last);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 search(const std::string& label, const ExecutionSpace& ex,
+                     IteratorType1 first, IteratorType1 last,
+                     IteratorType2 s_first, IteratorType2 s_last) {
+  return Impl::search_impl(label, ex, first, last, s_first, s_last);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto search(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType1, Properties1...>& view,
+            const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_impl("Kokkos::search_view_api_default", ex,
+                           KE::begin(view), KE::end(view), KE::begin(s_view),
+                           KE::end(s_view));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto search(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType1, Properties1...>& view,
+            const ::Kokkos::View<DataType2, Properties2...>& s_view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_impl(label, ex, KE::begin(view), KE::end(view),
+                           KE::begin(s_view), KE::end(s_view));
+}
+
+// overload set 2: binary predicate passed
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 search(const ExecutionSpace& ex, IteratorType1 first,
+                     IteratorType1 last, IteratorType2 s_first,
+                     IteratorType2 s_last, const BinaryPredicateType& pred) {
+  return Impl::search_impl("Kokkos::search_iterator_api_default", ex, first,
+                           last, s_first, s_last, pred);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 search(const std::string& label, const ExecutionSpace& ex,
+                     IteratorType1 first, IteratorType1 last,
+                     IteratorType2 s_first, IteratorType2 s_last,
+                     const BinaryPredicateType& pred) {
+  return Impl::search_impl(label, ex, first, last, s_first, s_last, pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto search(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType1, Properties1...>& view,
+            const ::Kokkos::View<DataType2, Properties2...>& s_view,
+            const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_impl("Kokkos::search_view_api_default", ex,
+                           KE::begin(view), KE::end(view), KE::begin(s_view),
+                           KE::end(s_view), pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicateType>
+auto search(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType1, Properties1...>& view,
+            const ::Kokkos::View<DataType2, Properties2...>& s_view,
+            const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(s_view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_impl(label, ex, KE::begin(view), KE::end(view),
+                           KE::begin(s_view), KE::end(s_view), pred);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_SearchN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_SearchN.hpp
new file mode 100644 (file)
index 0000000..854d911
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_N_HPP
+#define KOKKOS_STD_ALGORITHMS_SEARCH_N_HPP
+
+#include "impl/Kokkos_SearchN.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set 1: no binary predicate passed
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class ValueType>
+IteratorType search_n(const ExecutionSpace& ex, IteratorType first,
+                      IteratorType last, SizeType count,
+                      const ValueType& value) {
+  return Impl::search_n_impl("Kokkos::search_n_iterator_api_default", ex, first,
+                             last, count, value);
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class ValueType>
+IteratorType search_n(const std::string& label, const ExecutionSpace& ex,
+                      IteratorType first, IteratorType last, SizeType count,
+                      const ValueType& value) {
+  return Impl::search_n_impl(label, ex, first, last, count, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class ValueType>
+auto search_n(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& view,
+              SizeType count, const ValueType& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_n_impl("Kokkos::search_n_view_api_default", ex,
+                             KE::begin(view), KE::end(view), count, value);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class ValueType>
+auto search_n(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& view,
+              SizeType count, const ValueType& value) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_n_impl(label, ex, KE::begin(view), KE::end(view), count,
+                             value);
+}
+
+// overload set 2: binary predicate passed
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class ValueType, class BinaryPredicateType>
+IteratorType search_n(const ExecutionSpace& ex, IteratorType first,
+                      IteratorType last, SizeType count, const ValueType& value,
+                      const BinaryPredicateType& pred) {
+  return Impl::search_n_impl("Kokkos::search_n_iterator_api_default", ex, first,
+                             last, count, value, pred);
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class ValueType, class BinaryPredicateType>
+IteratorType search_n(const std::string& label, const ExecutionSpace& ex,
+                      IteratorType first, IteratorType last, SizeType count,
+                      const ValueType& value, const BinaryPredicateType& pred) {
+  return Impl::search_n_impl(label, ex, first, last, count, value, pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class ValueType, class BinaryPredicateType>
+auto search_n(const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& view,
+              SizeType count, const ValueType& value,
+              const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_n_impl("Kokkos::search_n_view_api_default", ex,
+                             KE::begin(view), KE::end(view), count, value,
+                             pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class SizeType, class ValueType, class BinaryPredicateType>
+auto search_n(const std::string& label, const ExecutionSpace& ex,
+              const ::Kokkos::View<DataType, Properties...>& view,
+              SizeType count, const ValueType& value,
+              const BinaryPredicateType& pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::search_n_impl(label, ex, KE::begin(view), KE::end(view), count,
+                             value, pred);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ShiftLeft.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ShiftLeft.hpp
new file mode 100644 (file)
index 0000000..cee111a
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_HPP
+#define KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_HPP
+
+#include "impl/Kokkos_ShiftLeft.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType shift_left(const ExecutionSpace& ex, IteratorType first,
+                        IteratorType last,
+                        typename IteratorType::difference_type n) {
+  return Impl::shift_left_impl("Kokkos::shift_left_iterator_api_default", ex,
+                               first, last, n);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType shift_left(const std::string& label, const ExecutionSpace& ex,
+                        IteratorType first, IteratorType last,
+                        typename IteratorType::difference_type n) {
+  return Impl::shift_left_impl(label, ex, first, last, n);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto shift_left(const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType, Properties...>& view,
+                typename decltype(begin(view))::difference_type n) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::shift_left_impl("Kokkos::shift_left_view_api_default", ex,
+                               begin(view), end(view), n);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto shift_left(const std::string& label, const ExecutionSpace& ex,
+                const ::Kokkos::View<DataType, Properties...>& view,
+                typename decltype(begin(view))::difference_type n) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::shift_left_impl(label, ex, begin(view), end(view), n);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ShiftRight.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_ShiftRight.hpp
new file mode 100644 (file)
index 0000000..f104d2b
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_HPP
+#define KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_HPP
+
+#include "impl/Kokkos_ShiftRight.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType shift_right(const ExecutionSpace& ex, IteratorType first,
+                         IteratorType last,
+                         typename IteratorType::difference_type n) {
+  return Impl::shift_right_impl("Kokkos::shift_right_iterator_api_default", ex,
+                                first, last, n);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType shift_right(const std::string& label, const ExecutionSpace& ex,
+                         IteratorType first, IteratorType last,
+                         typename IteratorType::difference_type n) {
+  return Impl::shift_right_impl(label, ex, first, last, n);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto shift_right(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& view,
+                 typename decltype(begin(view))::difference_type n) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::shift_right_impl("Kokkos::shift_right_view_api_default", ex,
+                                begin(view), end(view), n);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto shift_right(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType, Properties...>& view,
+                 typename decltype(begin(view))::difference_type n) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::shift_right_impl(label, ex, begin(view), end(view), n);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Swap.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Swap.hpp
new file mode 100644 (file)
index 0000000..9006aa9
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SWAP_HPP
+#define KOKKOS_STD_ALGORITHMS_SWAP_HPP
+
+#include <Kokkos_Core.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+// swap
+template <class T>
+KOKKOS_INLINE_FUNCTION void swap(T& a, T& b) noexcept {
+  static_assert(
+      std::is_move_assignable<T>::value && std::is_move_constructible<T>::value,
+      "Kokkos::Experimental::swap arguments must be move assignable "
+      "and move constructible");
+
+  T tmp = std::move(a);
+  a     = std::move(b);
+  b     = std::move(tmp);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_SwapRanges.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_SwapRanges.hpp
new file mode 100644 (file)
index 0000000..2997cda
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SWAP_RANGES_HPP
+#define KOKKOS_STD_ALGORITHMS_SWAP_RANGES_HPP
+
+#include "impl/Kokkos_SwapRanges.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 swap_ranges(const ExecutionSpace& ex, IteratorType1 first1,
+                          IteratorType1 last1, IteratorType2 first2) {
+  return Impl::swap_ranges_impl("Kokkos::swap_ranges_iterator_api_default", ex,
+                                first1, last1, first2);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto swap_ranges(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  assert(source.extent(0) == dest.extent(0));
+  return Impl::swap_ranges_impl("Kokkos::swap_ranges_view_api_default", ex,
+                                begin(source), end(source), begin(dest));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 swap_ranges(const std::string& label, const ExecutionSpace& ex,
+                          IteratorType1 first1, IteratorType1 last1,
+                          IteratorType2 first2) {
+  return Impl::swap_ranges_impl(label, ex, first1, last1, first2);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto swap_ranges(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  assert(source.extent(0) == dest.extent(0));
+  return Impl::swap_ranges_impl(label, ex, begin(source), end(source),
+                                begin(dest));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Transform.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Transform.hpp
new file mode 100644 (file)
index 0000000..6dfb83a
--- /dev/null
@@ -0,0 +1,166 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_HPP
+
+#include "impl/Kokkos_Transform.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class UnaryOperation>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      InputIterator, OutputIterator>::value,
+                  OutputIterator>
+transform(const ExecutionSpace& ex, InputIterator first1, InputIterator last1,
+          OutputIterator d_first, UnaryOperation unary_op) {
+  return Impl::transform_impl("Kokkos::transform_iterator_api_default", ex,
+                              first1, last1, d_first, std::move(unary_op));
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class UnaryOperation>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      InputIterator, OutputIterator>::value,
+                  OutputIterator>
+transform(const std::string& label, const ExecutionSpace& ex,
+          InputIterator first1, InputIterator last1, OutputIterator d_first,
+          UnaryOperation unary_op) {
+  return Impl::transform_impl(label, ex, first1, last1, d_first,
+                              std::move(unary_op));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class UnaryOperation>
+auto transform(const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType1, Properties1...>& source,
+               ::Kokkos::View<DataType2, Properties2...>& dest,
+               UnaryOperation unary_op) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::transform_impl("Kokkos::transform_view_api_default", ex,
+                              begin(source), end(source), begin(dest),
+                              std::move(unary_op));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class UnaryOperation>
+auto transform(const std::string& label, const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType1, Properties1...>& source,
+               ::Kokkos::View<DataType2, Properties2...>& dest,
+               UnaryOperation unary_op) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::transform_impl(label, ex, begin(source), end(source),
+                              begin(dest), std::move(unary_op));
+}
+
+template <class ExecutionSpace, class InputIterator1, class InputIterator2,
+          class OutputIterator, class BinaryOperation>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      InputIterator1, InputIterator2, OutputIterator>::value,
+                  OutputIterator>
+transform(const ExecutionSpace& ex, InputIterator1 first1, InputIterator1 last1,
+          InputIterator2 first2, OutputIterator d_first,
+          BinaryOperation binary_op) {
+  return Impl::transform_impl("Kokkos::transform_iterator_api_default", ex,
+                              first1, last1, first2, d_first,
+                              std::move(binary_op));
+}
+
+template <class ExecutionSpace, class InputIterator1, class InputIterator2,
+          class OutputIterator, class BinaryOperation>
+std::enable_if_t< ::Kokkos::Experimental::Impl::are_iterators<
+                      InputIterator1, InputIterator2, OutputIterator>::value,
+                  OutputIterator>
+transform(const std::string& label, const ExecutionSpace& ex,
+          InputIterator1 first1, InputIterator1 last1, InputIterator2 first2,
+          OutputIterator d_first, BinaryOperation binary_op) {
+  return Impl::transform_impl(label, ex, first1, last1, first2, d_first,
+                              std::move(binary_op));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class DataType3,
+          class... Properties3, class BinaryOperation>
+auto transform(const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType1, Properties1...>& source1,
+               const ::Kokkos::View<DataType2, Properties2...>& source2,
+               ::Kokkos::View<DataType3, Properties3...>& dest,
+               BinaryOperation binary_op) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source2);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::transform_impl("Kokkos::transform_view_api_default", ex,
+                              begin(source1), end(source1), begin(source2),
+                              begin(dest), std::move(binary_op));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class DataType3,
+          class... Properties3, class BinaryOperation>
+auto transform(const std::string& label, const ExecutionSpace& ex,
+               const ::Kokkos::View<DataType1, Properties1...>& source1,
+               const ::Kokkos::View<DataType2, Properties2...>& source2,
+               ::Kokkos::View<DataType3, Properties3...>& dest,
+               BinaryOperation binary_op) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source1);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source2);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::transform_impl(label, ex, begin(source1), end(source1),
+                              begin(source2), begin(dest),
+                              std::move(binary_op));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformExclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformExclusiveScan.hpp
new file mode 100644 (file)
index 0000000..d007359
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRASFORM_EXCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_TRASFORM_EXCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_TransformExclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType, class BinaryOpType,
+          class UnaryOpType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+transform_exclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+                         InputIteratorType last, OutputIteratorType first_dest,
+                         ValueType init_value, BinaryOpType binary_op,
+                         UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  return Impl::transform_exclusive_scan_impl(
+      "Kokkos::transform_exclusive_scan_custom_functors_iterator_api", ex,
+      first, last, first_dest, init_value, binary_op, unary_op);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType, class BinaryOpType,
+          class UnaryOpType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+transform_exclusive_scan(const std::string& label, const ExecutionSpace& ex,
+                         InputIteratorType first, InputIteratorType last,
+                         OutputIteratorType first_dest, ValueType init_value,
+                         BinaryOpType binary_op, UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  return Impl::transform_exclusive_scan_impl(label, ex, first, last, first_dest,
+                                             init_value, binary_op, unary_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType,
+          class BinaryOpType, class UnaryOpType>
+auto transform_exclusive_scan(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    ValueType init_value, BinaryOpType binary_op, UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::transform_exclusive_scan_impl(
+      "Kokkos::transform_exclusive_scan_custom_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+      init_value, binary_op, unary_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType,
+          class BinaryOpType, class UnaryOpType>
+auto transform_exclusive_scan(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    ValueType init_value, BinaryOpType binary_op, UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::transform_exclusive_scan_impl(
+      label, ex, KE::cbegin(view_from), KE::cend(view_from),
+      KE::begin(view_dest), init_value, binary_op, unary_op);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformInclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformInclusiveScan.hpp
new file mode 100644 (file)
index 0000000..088e162
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_HPP
+
+#include "impl/Kokkos_TransformInclusiveScan.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set 1 (no init value)
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType, class UnaryOpType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+transform_inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+                         InputIteratorType last, OutputIteratorType first_dest,
+                         BinaryOpType binary_op, UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::transform_inclusive_scan_impl(
+      "Kokkos::transform_inclusive_scan_custom_functors_iterator_api", ex,
+      first, last, first_dest, binary_op, unary_op);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType, class UnaryOpType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+transform_inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+                         InputIteratorType first, InputIteratorType last,
+                         OutputIteratorType first_dest, BinaryOpType binary_op,
+                         UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+
+  return Impl::transform_inclusive_scan_impl(label, ex, first, last, first_dest,
+                                             binary_op, unary_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOpType,
+          class UnaryOpType>
+auto transform_inclusive_scan(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    BinaryOpType binary_op, UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::transform_inclusive_scan_impl(
+      "Kokkos::transform_inclusive_scan_custom_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+      binary_op, unary_op);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOpType,
+          class UnaryOpType>
+auto transform_inclusive_scan(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    BinaryOpType binary_op, UnaryOpType unary_op) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::transform_inclusive_scan_impl(
+      label, ex, KE::cbegin(view_from), KE::cend(view_from),
+      KE::begin(view_dest), binary_op, unary_op);
+}
+
+// overload set 2 (init value)
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType, class UnaryOpType,
+          class ValueType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+transform_inclusive_scan(const ExecutionSpace& ex, InputIteratorType first,
+                         InputIteratorType last, OutputIteratorType first_dest,
+                         BinaryOpType binary_op, UnaryOpType unary_op,
+                         ValueType init_value) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  return Impl::transform_inclusive_scan_impl(
+      "Kokkos::transform_inclusive_scan_custom_functors_iterator_api", ex,
+      first, last, first_dest, binary_op, unary_op, init_value);
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType, class UnaryOpType,
+          class ValueType>
+std::enable_if_t<::Kokkos::Experimental::Impl::are_iterators<
+                     InputIteratorType, OutputIteratorType>::value,
+                 OutputIteratorType>
+transform_inclusive_scan(const std::string& label, const ExecutionSpace& ex,
+                         InputIteratorType first, InputIteratorType last,
+                         OutputIteratorType first_dest, BinaryOpType binary_op,
+                         UnaryOpType unary_op, ValueType init_value) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  return Impl::transform_inclusive_scan_impl(label, ex, first, last, first_dest,
+                                             binary_op, unary_op, init_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOpType,
+          class UnaryOpType, class ValueType>
+auto transform_inclusive_scan(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::transform_inclusive_scan_impl(
+      "Kokkos::transform_inclusive_scan_custom_functors_view_api", ex,
+      KE::cbegin(view_from), KE::cend(view_from), KE::begin(view_dest),
+      binary_op, unary_op, init_value);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryOpType,
+          class UnaryOpType, class ValueType>
+auto transform_inclusive_scan(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& view_from,
+    const ::Kokkos::View<DataType2, Properties2...>& view_dest,
+    BinaryOpType binary_op, UnaryOpType unary_op, ValueType init_value) {
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_from);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view_dest);
+  namespace KE = ::Kokkos::Experimental;
+  return Impl::transform_inclusive_scan_impl(
+      label, ex, KE::cbegin(view_from), KE::cend(view_from),
+      KE::begin(view_dest), binary_op, unary_op, init_value);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformReduce.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_TransformReduce.hpp
new file mode 100644 (file)
index 0000000..5caced5
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_HPP
+
+#include "impl/Kokkos_TransformReduce.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// ----------------------------
+// overload set1:
+// no custom functors passed, so equivalent to
+// transform_reduce(first1, last1, first2, init, plus<>(), multiplies<>());
+// ----------------------------
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ValueType>
+ValueType transform_reduce(const ExecutionSpace& ex, IteratorType1 first1,
+                           IteratorType1 last1, IteratorType2 first2,
+                           ValueType init_reduction_value) {
+  return Impl::transform_reduce_default_functors_impl(
+      "Kokkos::transform_reduce_default_functors_iterator_api", ex, first1,
+      last1, first2, std::move(init_reduction_value));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ValueType>
+ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
+                           IteratorType1 first1, IteratorType1 last1,
+                           IteratorType2 first2,
+                           ValueType init_reduction_value) {
+  return Impl::transform_reduce_default_functors_impl(
+      label, ex, first1, last1, first2, std::move(init_reduction_value));
+}
+
+// overload1 accepting views
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+ValueType transform_reduce(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& first_view,
+    const ::Kokkos::View<DataType2, Properties2...>& second_view,
+    ValueType init_reduction_value) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+  return Impl::transform_reduce_default_functors_impl(
+      "Kokkos::transform_reduce_default_functors_iterator_api", ex,
+      KE::cbegin(first_view), KE::cend(first_view), KE::cbegin(second_view),
+      std::move(init_reduction_value));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType>
+ValueType transform_reduce(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& first_view,
+    const ::Kokkos::View<DataType2, Properties2...>& second_view,
+    ValueType init_reduction_value) {
+  namespace KE = ::Kokkos::Experimental;
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+  return Impl::transform_reduce_default_functors_impl(
+      label, ex, KE::cbegin(first_view), KE::cend(first_view),
+      KE::cbegin(second_view), std::move(init_reduction_value));
+}
+
+//
+// overload set2:
+// accepts a custom transform and joiner functor
+//
+
+// Note the std refers to the arg BinaryReductionOp
+// but in the Kokkos naming convention, it corresponds
+// to a "joiner" that knows how to join two values
+// NOTE: "joiner/transformer" need to be commutative.
+
+// https://en.cppreference.com/w/cpp/algorithm/transform_reduce
+
+// api accepting iterators
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ValueType, class BinaryJoinerType, class BinaryTransform>
+ValueType transform_reduce(const ExecutionSpace& ex, IteratorType1 first1,
+                           IteratorType1 last1, IteratorType2 first2,
+                           ValueType init_reduction_value,
+                           BinaryJoinerType joiner,
+                           BinaryTransform transformer) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::transform_reduce_custom_functors_impl(
+      "Kokkos::transform_reduce_custom_functors_iterator_api", ex, first1,
+      last1, first2, std::move(init_reduction_value), std::move(joiner),
+      std::move(transformer));
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ValueType, class BinaryJoinerType, class BinaryTransform>
+ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
+                           IteratorType1 first1, IteratorType1 last1,
+                           IteratorType2 first2, ValueType init_reduction_value,
+                           BinaryJoinerType joiner,
+                           BinaryTransform transformer) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::transform_reduce_custom_functors_impl(
+      label, ex, first1, last1, first2, std::move(init_reduction_value),
+      std::move(joiner), std::move(transformer));
+}
+
+// accepting views
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType,
+          class BinaryJoinerType, class BinaryTransform>
+ValueType transform_reduce(
+    const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& first_view,
+    const ::Kokkos::View<DataType2, Properties2...>& second_view,
+    ValueType init_reduction_value, BinaryJoinerType joiner,
+    BinaryTransform transformer) {
+  namespace KE = ::Kokkos::Experimental;
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+  return Impl::transform_reduce_custom_functors_impl(
+      "Kokkos::transform_reduce_custom_functors_view_api", ex,
+      KE::cbegin(first_view), KE::cend(first_view), KE::cbegin(second_view),
+      std::move(init_reduction_value), std::move(joiner),
+      std::move(transformer));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class ValueType,
+          class BinaryJoinerType, class BinaryTransform>
+ValueType transform_reduce(
+    const std::string& label, const ExecutionSpace& ex,
+    const ::Kokkos::View<DataType1, Properties1...>& first_view,
+    const ::Kokkos::View<DataType2, Properties2...>& second_view,
+    ValueType init_reduction_value, BinaryJoinerType joiner,
+    BinaryTransform transformer) {
+  namespace KE = ::Kokkos::Experimental;
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(first_view);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(second_view);
+
+  return Impl::transform_reduce_custom_functors_impl(
+      label, ex, KE::cbegin(first_view), KE::cend(first_view),
+      KE::cbegin(second_view), std::move(init_reduction_value),
+      std::move(joiner), std::move(transformer));
+}
+
+//
+// overload set3:
+//
+// accepting iterators
+template <class ExecutionSpace, class IteratorType, class ValueType,
+          class BinaryJoinerType, class UnaryTransform>
+// need this to avoid ambiguous call
+std::enable_if_t<
+    ::Kokkos::Experimental::Impl::are_iterators<IteratorType>::value, ValueType>
+transform_reduce(const ExecutionSpace& ex, IteratorType first1,
+                 IteratorType last1, ValueType init_reduction_value,
+                 BinaryJoinerType joiner, UnaryTransform transformer) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::transform_reduce_custom_functors_impl(
+      "Kokkos::transform_reduce_custom_functors_iterator_api", ex, first1,
+      last1, std::move(init_reduction_value), std::move(joiner),
+      std::move(transformer));
+}
+
+template <class ExecutionSpace, class IteratorType, class ValueType,
+          class BinaryJoinerType, class UnaryTransform>
+// need this to avoid ambiguous call
+std::enable_if_t<
+    ::Kokkos::Experimental::Impl::are_iterators<IteratorType>::value, ValueType>
+transform_reduce(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first1, IteratorType last1,
+                 ValueType init_reduction_value, BinaryJoinerType joiner,
+                 UnaryTransform transformer) {
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  return Impl::transform_reduce_custom_functors_impl(
+      label, ex, first1, last1, std::move(init_reduction_value),
+      std::move(joiner), std::move(transformer));
+}
+
+// accepting views
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType, class BinaryJoinerType, class UnaryTransform>
+ValueType transform_reduce(const ExecutionSpace& ex,
+                           const ::Kokkos::View<DataType, Properties...>& view,
+                           ValueType init_reduction_value,
+                           BinaryJoinerType joiner,
+                           UnaryTransform transformer) {
+  namespace KE = ::Kokkos::Experimental;
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::transform_reduce_custom_functors_impl(
+      "Kokkos::transform_reduce_custom_functors_view_api", ex, KE::cbegin(view),
+      KE::cend(view), std::move(init_reduction_value), std::move(joiner),
+      std::move(transformer));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class ValueType, class BinaryJoinerType, class UnaryTransform>
+ValueType transform_reduce(const std::string& label, const ExecutionSpace& ex,
+                           const ::Kokkos::View<DataType, Properties...>& view,
+                           ValueType init_reduction_value,
+                           BinaryJoinerType joiner,
+                           UnaryTransform transformer) {
+  namespace KE = ::Kokkos::Experimental;
+  static_assert(std::is_move_constructible<ValueType>::value,
+                "ValueType must be move constructible.");
+
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+
+  return Impl::transform_reduce_custom_functors_impl(
+      label, ex, KE::cbegin(view), KE::cend(view),
+      std::move(init_reduction_value), std::move(joiner),
+      std::move(transformer));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Unique.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_Unique.hpp
new file mode 100644 (file)
index 0000000..aeb54a6
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_HPP
+#define KOKKOS_STD_ALGORITHMS_UNIQUE_HPP
+
+#include "impl/Kokkos_Unique.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// note: the enable_if below is to avoid "call to ... is ambiguous"
+// for example in the unit test when using a variadic function
+
+// overload set1
+template <class ExecutionSpace, class IteratorType>
+std::enable_if_t<!::Kokkos::is_view<IteratorType>::value, IteratorType> unique(
+    const ExecutionSpace& ex, IteratorType first, IteratorType last) {
+  return Impl::unique_impl("Kokkos::unique_iterator_api_default", ex, first,
+                           last);
+}
+
+template <class ExecutionSpace, class IteratorType>
+std::enable_if_t<!::Kokkos::is_view<IteratorType>::value, IteratorType> unique(
+    const std::string& label, const ExecutionSpace& ex, IteratorType first,
+    IteratorType last) {
+  return Impl::unique_impl(label, ex, first, last);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto unique(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return ::Kokkos::Experimental::unique("Kokkos::unique_view_api_default", ex,
+                                        begin(view), end(view));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties>
+auto unique(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return ::Kokkos::Experimental::unique(label, ex, begin(view), end(view));
+}
+
+// overload set2
+template <class ExecutionSpace, class IteratorType, class BinaryPredicate>
+IteratorType unique(const ExecutionSpace& ex, IteratorType first,
+                    IteratorType last, BinaryPredicate pred) {
+  return Impl::unique_impl("Kokkos::unique_iterator_api_default", ex, first,
+                           last, pred);
+}
+
+template <class ExecutionSpace, class IteratorType, class BinaryPredicate>
+IteratorType unique(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, IteratorType last,
+                    BinaryPredicate pred) {
+  return Impl::unique_impl(label, ex, first, last, pred);
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class BinaryPredicate>
+auto unique(const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view,
+            BinaryPredicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::unique_impl("Kokkos::unique_view_api_default", ex, begin(view),
+                           end(view), std::move(pred));
+}
+
+template <class ExecutionSpace, class DataType, class... Properties,
+          class BinaryPredicate>
+auto unique(const std::string& label, const ExecutionSpace& ex,
+            const ::Kokkos::View<DataType, Properties...>& view,
+            BinaryPredicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(view);
+  return Impl::unique_impl(label, ex, begin(view), end(view), std::move(pred));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_UniqueCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/Kokkos_UniqueCopy.hpp
new file mode 100644 (file)
index 0000000..632b560
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_HPP
+#define KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_HPP
+
+#include "impl/Kokkos_UniqueCopy.hpp"
+#include "Kokkos_BeginEnd.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+
+// overload set1
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+std::enable_if_t<!::Kokkos::is_view<InputIterator>::value, OutputIterator>
+unique_copy(const ExecutionSpace& ex, InputIterator first, InputIterator last,
+            OutputIterator d_first) {
+  return Impl::unique_copy_impl("Kokkos::unique_copy_iterator_api_default", ex,
+                                first, last, d_first);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+std::enable_if_t<!::Kokkos::is_view<InputIterator>::value, OutputIterator>
+unique_copy(const std::string& label, const ExecutionSpace& ex,
+            InputIterator first, InputIterator last, OutputIterator d_first) {
+  return Impl::unique_copy_impl(label, ex, first, last, d_first);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto unique_copy(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 const ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return ::Kokkos::Experimental::unique_copy(
+      "Kokkos::unique_copy_view_api_default", ex, cbegin(source), cend(source),
+      begin(dest));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2>
+auto unique_copy(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 const ::Kokkos::View<DataType2, Properties2...>& dest) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return ::Kokkos::Experimental::unique_copy(label, ex, cbegin(source),
+                                             cend(source), begin(dest));
+}
+
+// overload set2
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class BinaryPredicate>
+OutputIterator unique_copy(const ExecutionSpace& ex, InputIterator first,
+                           InputIterator last, OutputIterator d_first,
+                           BinaryPredicate pred) {
+  return Impl::unique_copy_impl("Kokkos::unique_copy_iterator_api_default", ex,
+                                first, last, d_first, pred);
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class BinaryPredicate>
+OutputIterator unique_copy(const std::string& label, const ExecutionSpace& ex,
+                           InputIterator first, InputIterator last,
+                           OutputIterator d_first, BinaryPredicate pred) {
+  return Impl::unique_copy_impl(label, ex, first, last, d_first, pred);
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicate>
+auto unique_copy(const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 const ::Kokkos::View<DataType2, Properties2...>& dest,
+                 BinaryPredicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::unique_copy_impl("Kokkos::unique_copy_view_api_default", ex,
+                                cbegin(source), cend(source), begin(dest),
+                                std::move(pred));
+}
+
+template <class ExecutionSpace, class DataType1, class... Properties1,
+          class DataType2, class... Properties2, class BinaryPredicate>
+auto unique_copy(const std::string& label, const ExecutionSpace& ex,
+                 const ::Kokkos::View<DataType1, Properties1...>& source,
+                 const ::Kokkos::View<DataType2, Properties2...>& dest,
+                 BinaryPredicate pred) {
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(source);
+  Impl::static_assert_is_admissible_to_kokkos_std_algorithms(dest);
+
+  return Impl::unique_copy_impl(label, ex, cbegin(source), cend(source),
+                                begin(dest), std::move(pred));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AdjacentDifference.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AdjacentDifference.hpp
new file mode 100644 (file)
index 0000000..35c78b8
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_DIFFERENCE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ValueType1, class ValueType2, class RetType = ValueType2>
+struct StdAdjacentDifferenceDefaultBinaryOpFunctor {
+  KOKKOS_FUNCTION
+  constexpr RetType operator()(const ValueType1& a, const ValueType2& b) const {
+    return a - b;
+  }
+};
+
+template <class InputIteratorType, class OutputIteratorType,
+          class BinaryOperator>
+struct StdAdjacentDiffFunctor {
+  using index_type = typename InputIteratorType::difference_type;
+
+  const InputIteratorType m_first_from;
+  const OutputIteratorType m_first_dest;
+  BinaryOperator m_op;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i) const {
+    const auto& my_value = m_first_from[i];
+    if (i == 0) {
+      m_first_dest[i] = my_value;
+    } else {
+      const auto& left_value = m_first_from[i - 1];
+      m_first_dest[i]        = m_op(my_value, left_value);
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdAdjacentDiffFunctor(InputIteratorType first_from,
+                         OutputIteratorType first_dest, BinaryOperator op)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_op(std::move(op)) {}
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOp>
+OutputIteratorType adjacent_difference_impl(const std::string& label,
+                                            const ExecutionSpace& ex,
+                                            InputIteratorType first_from,
+                                            InputIteratorType last_from,
+                                            OutputIteratorType first_dest,
+                                            BinaryOp bin_op) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  if (first_from == last_from) {
+    return first_dest;
+  }
+
+  // aliases
+  using value_type    = typename OutputIteratorType::value_type;
+  using aux_view_type = ::Kokkos::View<value_type*, ExecutionSpace>;
+  using functor_t =
+      StdAdjacentDiffFunctor<InputIteratorType, OutputIteratorType, BinaryOp>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  aux_view_type aux_view("aux_view", num_elements);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         functor_t(first_from, first_dest, bin_op));
+  ex.fence("Kokkos::adjacent_difference: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AdjacentFind.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AdjacentFind.hpp
new file mode 100644 (file)
index 0000000..155f6c7
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ADJACENT_FIND_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType, class ReducerType,
+          class PredicateType>
+struct StdAdjacentFindFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType m_first;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    const auto& my_value   = m_first[i];
+    const auto& next_value = m_first[i + 1];
+    const bool are_equal   = m_p(my_value, next_value);
+
+    auto rv =
+        are_equal
+            ? red_value_type{i}
+            : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdAdjacentFindFunctor(IteratorType first, ReducerType reducer,
+                         PredicateType p)
+      : m_first(std::move(first)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+IteratorType adjacent_find_impl(const std::string& label,
+                                const ExecutionSpace& ex, IteratorType first,
+                                IteratorType last, PredicateType pred) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+
+  if (num_elements <= 1) {
+    return last;
+  }
+
+  using index_type           = typename IteratorType::difference_type;
+  using reducer_type         = FirstLoc<index_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using func_t = StdAdjacentFindFunctor<index_type, IteratorType, reducer_type,
+                                        PredicateType>;
+
+  reduction_value_type red_result;
+  reducer_type reducer(red_result);
+
+  // note that we use below num_elements-1 because
+  // each index i in the reduction checks i and (i+1).
+  ::Kokkos::parallel_reduce(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements - 1),
+      func_t(first, reducer, pred), reducer);
+
+  // fence not needed because reducing into scalar
+  if (red_result.min_loc_true ==
+      ::Kokkos::reduction_identity<index_type>::min()) {
+    return last;
+  } else {
+    return first + red_result.min_loc_true;
+  }
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType adjacent_find_impl(const std::string& label,
+                                const ExecutionSpace& ex, IteratorType first,
+                                IteratorType last) {
+  using value_type     = typename IteratorType::value_type;
+  using default_pred_t = StdAlgoEqualBinaryPredicate<value_type>;
+  return adjacent_find_impl(label, ex, first, last, default_pred_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AllOfAnyOfNoneOf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_AllOfAnyOfNoneOf.hpp
new file mode 100644 (file)
index 0000000..dd8ae4f
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ALL_OF_ANY_OF_NONE_OF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ALL_OF_ANY_OF_NONE_OF_IMPL_HPP
+
+#include "Kokkos_FindIfOrNot.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool all_of_impl(const std::string& label, const ExecutionSpace& ex,
+                 InputIterator first, InputIterator last, Predicate predicate) {
+  return (find_if_or_not_impl<false>(label, ex, first, last, predicate) ==
+          last);
+}
+
+template <class ExecutionSpace, class InputIterator, class Predicate>
+bool any_of_impl(const std::string& label, const ExecutionSpace& ex,
+                 InputIterator first, InputIterator last, Predicate predicate) {
+  return (find_if_or_not_impl<true>(label, ex, first, last, predicate) != last);
+}
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+bool none_of_impl(const std::string& label, const ExecutionSpace& ex,
+                  IteratorType first, IteratorType last, Predicate predicate) {
+  return (find_if_or_not_impl<true>(label, ex, first, last, predicate) == last);
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Constraints.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Constraints.hpp
new file mode 100644 (file)
index 0000000..ec54cd1
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_CONSTRAINTS_HPP_
+#define KOKKOS_STD_ALGORITHMS_CONSTRAINTS_HPP_
+
+#include <Kokkos_DetectionIdiom.hpp>
+#include <Kokkos_View.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <typename T, typename enable = void>
+struct is_admissible_to_kokkos_std_algorithms : std::false_type {};
+
+template <typename T>
+struct is_admissible_to_kokkos_std_algorithms<
+    T, std::enable_if_t< ::Kokkos::is_view<T>::value && T::rank == 1 &&
+                         (std::is_same<typename T::traits::array_layout,
+                                       Kokkos::LayoutLeft>::value ||
+                          std::is_same<typename T::traits::array_layout,
+                                       Kokkos::LayoutRight>::value ||
+                          std::is_same<typename T::traits::array_layout,
+                                       Kokkos::LayoutStride>::value)> >
+    : std::true_type {};
+
+template <class ViewType>
+KOKKOS_INLINE_FUNCTION constexpr void
+static_assert_is_admissible_to_kokkos_std_algorithms(
+    const ViewType& /* view */) {
+  static_assert(is_admissible_to_kokkos_std_algorithms<ViewType>::value,
+                "Currently, Kokkos standard algorithms only accept 1D Views.");
+}
+
+//
+// is_iterator
+//
+template <class T>
+using iterator_category_t = typename T::iterator_category;
+
+template <class T>
+using is_iterator = Kokkos::is_detected<iterator_category_t, T>;
+
+//
+// are_iterators
+//
+template <class... Args>
+struct are_iterators;
+
+template <class T>
+struct are_iterators<T> {
+  static constexpr bool value = is_iterator<T>::value;
+};
+
+template <class Head, class... Tail>
+struct are_iterators<Head, Tail...> {
+  static constexpr bool value =
+      are_iterators<Head>::value && are_iterators<Tail...>::value;
+};
+
+//
+// are_random_access_iterators
+//
+template <class... Args>
+struct are_random_access_iterators;
+
+template <class T>
+struct are_random_access_iterators<T> {
+  static constexpr bool value =
+      is_iterator<T>::value &&
+      std::is_base_of<std::random_access_iterator_tag,
+                      typename T::iterator_category>::value;
+};
+
+template <class Head, class... Tail>
+struct are_random_access_iterators<Head, Tail...> {
+  static constexpr bool value = are_random_access_iterators<Head>::value &&
+                                are_random_access_iterators<Tail...>::value;
+};
+
+//
+// iterators_are_accessible_from
+//
+template <class... Args>
+struct iterators_are_accessible_from;
+
+template <class ExeSpace, class IteratorType>
+struct iterators_are_accessible_from<ExeSpace, IteratorType> {
+  using view_type = typename IteratorType::view_type;
+  static constexpr bool value =
+      SpaceAccessibility<ExeSpace,
+                         typename view_type::memory_space>::accessible;
+};
+
+template <class ExeSpace, class Head, class... Tail>
+struct iterators_are_accessible_from<ExeSpace, Head, Tail...> {
+  static constexpr bool value =
+      iterators_are_accessible_from<ExeSpace, Head>::value &&
+      iterators_are_accessible_from<ExeSpace, Tail...>::value;
+};
+
+template <class ExecutionSpace, class... IteratorTypes>
+KOKKOS_INLINE_FUNCTION constexpr void
+static_assert_random_access_and_accessible(const ExecutionSpace& /* ex */,
+                                           IteratorTypes... /* iterators */) {
+  static_assert(
+      are_random_access_iterators<IteratorTypes...>::value,
+      "Currently, Kokkos standard algorithms require random access iterators.");
+  static_assert(
+      iterators_are_accessible_from<ExecutionSpace, IteratorTypes...>::value,
+      "Incompatible view/iterator and execution space");
+}
+
+//
+// have matching difference_type
+//
+template <class... Args>
+struct iterators_have_matching_difference_type;
+
+template <class T>
+struct iterators_have_matching_difference_type<T> {
+  static constexpr bool value = true;
+};
+
+template <class T1, class T2>
+struct iterators_have_matching_difference_type<T1, T2> {
+  static constexpr bool value =
+      std::is_same<typename T1::difference_type,
+                   typename T2::difference_type>::value;
+};
+
+template <class T1, class T2, class... Tail>
+struct iterators_have_matching_difference_type<T1, T2, Tail...> {
+  static constexpr bool value =
+      iterators_have_matching_difference_type<T1, T2>::value &&
+      iterators_have_matching_difference_type<T2, Tail...>::value;
+};
+
+template <class IteratorType1, class IteratorType2>
+KOKKOS_INLINE_FUNCTION constexpr void
+static_assert_iterators_have_matching_difference_type(IteratorType1 /* it1 */,
+                                                      IteratorType2 /* it2 */) {
+  static_assert(iterators_have_matching_difference_type<IteratorType1,
+                                                        IteratorType2>::value,
+                "Iterators do not have matching difference_type");
+}
+
+template <class IteratorType1, class IteratorType2, class IteratorType3>
+KOKKOS_INLINE_FUNCTION constexpr void
+static_assert_iterators_have_matching_difference_type(IteratorType1 it1,
+                                                      IteratorType2 it2,
+                                                      IteratorType3 it3) {
+  static_assert_iterators_have_matching_difference_type(it1, it2);
+  static_assert_iterators_have_matching_difference_type(it2, it3);
+}
+
+//
+// not_openmptarget
+//
+template <class ExeSpace>
+struct not_openmptarget {
+#ifndef KOKKOS_ENABLE_OPENMPTARGET
+  static constexpr bool value = true;
+#else
+  static constexpr bool value =
+      !std::is_same<std::decay_t<ExeSpace>,
+                    ::Kokkos::Experimental::OpenMPTarget>::value;
+#endif
+};
+
+template <class ExecutionSpace>
+KOKKOS_INLINE_FUNCTION constexpr void static_assert_is_not_openmptarget(
+    const ExecutionSpace&) {
+  static_assert(not_openmptarget<ExecutionSpace>::value,
+                "Currently, Kokkos standard algorithms do not support custom "
+                "comparators in OpenMPTarget");
+}
+
+//
+// valid range
+//
+template <class IteratorType>
+void expect_valid_range(IteratorType first, IteratorType last) {
+  // this is a no-op for release
+  KOKKOS_EXPECTS(last >= first);
+  // avoid compiler complaining when KOKKOS_EXPECTS is no-op
+  (void)first;
+  (void)last;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyBackward.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyBackward.hpp
new file mode 100644 (file)
index 0000000..18f6140
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_BACKWARD_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2>
+struct StdCopyBackwardFunctor {
+  static_assert(std::is_signed<IndexType>::value,
+                "Kokkos: StdCopyBackwardFunctor requires signed index type");
+
+  IteratorType1 m_last;
+  IteratorType2 m_dest_last;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const { m_dest_last[-i - 1] = m_last[-i - 1]; }
+
+  KOKKOS_FUNCTION
+  StdCopyBackwardFunctor(IteratorType1 _last, IteratorType2 _dest_last)
+      : m_last(std::move(_last)), m_dest_last(std::move(_dest_last)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 copy_backward_impl(const std::string& label,
+                                 const ExecutionSpace& ex, IteratorType1 first,
+                                 IteratorType1 last, IteratorType2 d_last) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_last);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using index_type = typename IteratorType1::difference_type;
+  using func_t =
+      StdCopyBackwardFunctor<index_type, IteratorType1, IteratorType2>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(last, d_last));
+  ex.fence("Kokkos::copy_backward: fence after operation");
+
+  // return
+  return d_last - num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyCopyN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyCopyN.hpp
new file mode 100644 (file)
index 0000000..03b6fc6
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIterator, class OutputIterator>
+struct StdCopyFunctor {
+  InputIterator m_first;
+  OutputIterator m_dest_first;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const { m_dest_first[i] = m_first[i]; }
+
+  KOKKOS_FUNCTION
+  StdCopyFunctor(InputIterator _first, OutputIterator _dest_first)
+      : m_first(std::move(_first)), m_dest_first(std::move(_dest_first)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator copy_impl(const std::string& label, const ExecutionSpace& ex,
+                         InputIterator first, InputIterator last,
+                         OutputIterator d_first) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using index_type = typename InputIterator::difference_type;
+  using func_t     = StdCopyFunctor<index_type, InputIterator, OutputIterator>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first, d_first));
+  ex.fence("Kokkos::copy: fence after operation");
+
+  // return
+  return d_first + num_elements;
+}
+
+template <class ExecutionSpace, class InputIterator, class Size,
+          class OutputIterator>
+OutputIterator copy_n_impl(const std::string& label, const ExecutionSpace& ex,
+                           InputIterator first_from, Size count,
+                           OutputIterator first_dest) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+
+  if (count > 0) {
+    return copy_impl(label, ex, first_from, first_from + count, first_dest);
+  } else {
+    return first_dest;
+  }
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CopyIf.hpp
new file mode 100644 (file)
index 0000000..aebb5a9
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COPY_IF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_COPY_IF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class FirstFrom, class FirstDest, class PredType>
+struct StdCopyIfFunctor {
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+  PredType m_pred;
+
+  KOKKOS_FUNCTION
+  StdCopyIfFunctor(FirstFrom first_from, FirstDest first_dest, PredType pred)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_pred(std::move(pred)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, IndexType& update,
+                  const bool final_pass) const {
+    const auto& myval = m_first_from[i];
+    if (final_pass) {
+      if (m_pred(myval)) {
+        m_first_dest[update] = myval;
+      }
+    }
+
+    if (m_pred(myval)) {
+      update += 1;
+    }
+  }
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class PredicateType>
+OutputIterator copy_if_impl(const std::string& label, const ExecutionSpace& ex,
+                            InputIterator first, InputIterator last,
+                            OutputIterator d_first, PredicateType pred) {
+  /*
+    To explain the impl, suppose that our data is:
+
+    | 1 | 1 | 2 | 2 | 3 | -2 | 4 | 4 | 4 | 5 | 7 | -10 |
+
+    and we want to copy only the even entries,
+    We can use an exclusive scan where the "update"
+    is incremented only for the elements that satisfy the predicate.
+    This way, the update allows us to track where in the destination
+    we need to copy the elements:
+
+    In this case, counting only the even entries, the exlusive scan
+    during the final pass would yield:
+
+    | 0 | 0 | 0 | 1 | 2 | 2 | 3 | 4 | 5 | 6 | 6 | 6 |
+              *   *       *   *   *   *           *
+
+    which provides the indexing in the destination where
+    each starred (*) element needs to be copied to since
+    the starred elements are those that satisfy the predicate.
+   */
+
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    return d_first;
+  } else {
+    // aliases
+    using index_type = typename InputIterator::difference_type;
+    using func_type  = StdCopyIfFunctor<index_type, InputIterator,
+                                       OutputIterator, PredicateType>;
+
+    // run
+    const auto num_elements = Kokkos::Experimental::distance(first, last);
+    index_type count        = 0;
+    ::Kokkos::parallel_scan(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_type(first, d_first, pred), count);
+
+    // fence not needed because of the scan accumulating into count
+    return d_first + count;
+  }
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CountCountIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_CountCountIf.hpp
new file mode 100644 (file)
index 0000000..982ac40
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_COUNT_IF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_COUNT_IF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class Predicate>
+struct StdCountIfFunctor {
+  using index_type = typename IteratorType::difference_type;
+  IteratorType m_first;
+  Predicate m_predicate;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i, index_type& lsum) const {
+    if (m_predicate(m_first[i])) {
+      lsum++;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdCountIfFunctor(IteratorType _first, Predicate _predicate)
+      : m_first(std::move(_first)), m_predicate(std::move(_predicate)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class Predicate>
+typename IteratorType::difference_type count_if_impl(const std::string& label,
+                                                     const ExecutionSpace& ex,
+                                                     IteratorType first,
+                                                     IteratorType last,
+                                                     Predicate predicate) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using func_t = StdCountIfFunctor<IteratorType, Predicate>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  typename IteratorType::difference_type count = 0;
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_t(first, predicate), count);
+  ex.fence("Kokkos::count_if: fence after operation");
+
+  return count;
+}
+
+template <class ExecutionSpace, class IteratorType, class T>
+auto count_impl(const std::string& label, const ExecutionSpace& ex,
+                IteratorType first, IteratorType last, const T& value) {
+  return count_if_impl(
+      label, ex, first, last,
+      ::Kokkos::Experimental::Impl::StdAlgoEqualsValUnaryPredicate<T>(value));
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Equal.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Equal.hpp
new file mode 100644 (file)
index 0000000..9482917
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_EQUAL_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_EQUAL_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+struct StdEqualFunctor {
+  IteratorType1 m_first1;
+  IteratorType2 m_first2;
+  BinaryPredicateType m_predicate;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i, std::size_t& lsum) const {
+    if (!m_predicate(m_first1[i], m_first2[i])) {
+      lsum = 1;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdEqualFunctor(IteratorType1 _first1, IteratorType2 _first2,
+                  BinaryPredicateType _predicate)
+      : m_first1(std::move(_first1)),
+        m_first2(std::move(_first2)),
+        m_predicate(std::move(_predicate)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+bool equal_impl(const std::string& label, const ExecutionSpace& ex,
+                IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
+                BinaryPredicateType predicate) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+  Impl::expect_valid_range(first1, last1);
+
+  // aliases
+  using index_type = typename IteratorType1::difference_type;
+  using func_t     = StdEqualFunctor<index_type, IteratorType1, IteratorType2,
+                                 BinaryPredicateType>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+  std::size_t different   = 0;
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_t(first1, first2, predicate), different);
+  ex.fence("Kokkos::equal: fence after operation");
+
+  return !different;
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+bool equal_impl(const std::string& label, const ExecutionSpace& ex,
+                IteratorType1 first1, IteratorType1 last1,
+                IteratorType2 first2) {
+  using value_type1 = typename IteratorType1::value_type;
+  using value_type2 = typename IteratorType2::value_type;
+  using pred_t      = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+  return equal_impl(label, ex, first1, last1, first2, pred_t());
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+bool equal_impl(const std::string& label, const ExecutionSpace& ex,
+                IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
+                IteratorType2 last2, BinaryPredicateType predicate) {
+  const auto d1 = ::Kokkos::Experimental::distance(first1, last1);
+  const auto d2 = ::Kokkos::Experimental::distance(first2, last2);
+  if (d1 != d2) {
+    return false;
+  }
+
+  return equal_impl(label, ex, first1, last1, first2, predicate);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+bool equal_impl(const std::string& label, const ExecutionSpace& ex,
+                IteratorType1 first1, IteratorType1 last1, IteratorType2 first2,
+                IteratorType2 last2) {
+  Impl::expect_valid_range(first1, last1);
+  Impl::expect_valid_range(first2, last2);
+
+  using value_type1 = typename IteratorType1::value_type;
+  using value_type2 = typename IteratorType2::value_type;
+  using pred_t      = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+  return equal_impl(label, ex, first1, last1, first2, last2, pred_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ExclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ExclusiveScan.hpp
new file mode 100644 (file)
index 0000000..0ae4651
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_EXCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+#include "Kokkos_IdentityReferenceUnaryFunctor.hpp"
+#include <std_algorithms/Kokkos_TransformExclusiveScan.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+          class FirstDest>
+struct ExclusiveScanDefaultFunctorForKnownNeutralElement {
+  using execution_space = ExeSpace;
+
+  ValueType m_init_value;
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+
+  KOKKOS_FUNCTION
+  ExclusiveScanDefaultFunctorForKnownNeutralElement(ValueType init,
+                                                    FirstFrom first_from,
+                                                    FirstDest first_dest)
+      : m_init_value(std::move(init)),
+        m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, ValueType& update,
+                  const bool final_pass) const {
+    if (final_pass) m_first_dest[i] = update + m_init_value;
+    update += m_first_from[i];
+  }
+};
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+          class FirstDest>
+struct ExclusiveScanDefaultFunctor {
+  using execution_space = ExeSpace;
+  using value_type =
+      ::Kokkos::Experimental::Impl::ValueWrapperForNoNeutralElement<ValueType>;
+
+  ValueType m_init_value;
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+
+  KOKKOS_FUNCTION
+  ExclusiveScanDefaultFunctor(ValueType init, FirstFrom first_from,
+                              FirstDest first_dest)
+      : m_init_value(std::move(init)),
+        m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, value_type& update,
+                  const bool final_pass) const {
+    if (final_pass) {
+      if (i == 0) {
+        m_first_dest[i] = m_init_value;
+      } else {
+        m_first_dest[i] = update.val + m_init_value;
+      }
+    }
+
+    const auto tmp = value_type{m_first_from[i], false};
+    this->join(update, tmp);
+  }
+
+  KOKKOS_FUNCTION
+  void init(value_type& update) const {
+    update.val        = {};
+    update.is_initial = true;
+  }
+
+  KOKKOS_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    if (update.is_initial) {
+      update.val        = input.val;
+      update.is_initial = false;
+    } else {
+      update.val = update.val + input.val;
+    }
+  }
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType, class BinaryOpType>
+OutputIteratorType exclusive_scan_custom_op_impl(
+    const std::string& label, const ExecutionSpace& ex,
+    InputIteratorType first_from, InputIteratorType last_from,
+    OutputIteratorType first_dest, ValueType init_value, BinaryOpType bop) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type    = typename InputIteratorType::difference_type;
+  using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<ValueType>;
+  using func_type =
+      TransformExclusiveScanFunctor<ExecutionSpace, index_type, ValueType,
+                                    InputIteratorType, OutputIteratorType,
+                                    BinaryOpType, unary_op_type>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_type(init_value, first_from, first_dest, bop, unary_op_type()));
+  ex.fence("Kokkos::exclusive_scan_custom_op: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+template <typename ValueType>
+using ex_scan_has_reduction_identity_sum_t =
+    decltype(Kokkos::reduction_identity<ValueType>::sum());
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType>
+OutputIteratorType exclusive_scan_default_op_impl(const std::string& label,
+                                                  const ExecutionSpace& ex,
+                                                  InputIteratorType first_from,
+                                                  InputIteratorType last_from,
+                                                  OutputIteratorType first_dest,
+                                                  ValueType init_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // does it make sense to do this static_assert too?
+  // using input_iterator_value_type = typename InputIteratorType::value_type;
+  // static_assert
+  //   (std::is_convertible<std::remove_cv_t<input_iterator_value_type>,
+  //   ValueType>::value,
+  //    "exclusive_scan: InputIteratorType::value_type not convertible to
+  //    ValueType");
+
+  // we are unnecessarily duplicating code, but this is on purpose
+  // so that we can use the default_op for OpenMPTarget.
+  // Originally, I had this implemented as:
+  // '''
+  // using bop_type   = StdExclusiveScanDefaultJoinFunctor<ValueType>;
+  // call exclusive_scan_custom_op_impl(..., bop_type());
+  // '''
+  // which avoids duplicating the functors, but for OpenMPTarget
+  // I cannot use a custom binary op.
+  // This is the same problem that occurs for reductions.
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using func_type  = std::conditional_t<
+      ::Kokkos::is_detected<ex_scan_has_reduction_identity_sum_t,
+                            ValueType>::value,
+      ExclusiveScanDefaultFunctorForKnownNeutralElement<
+          ExecutionSpace, index_type, ValueType, InputIteratorType,
+          OutputIteratorType>,
+      ExclusiveScanDefaultFunctor<ExecutionSpace, index_type, ValueType,
+                                  InputIteratorType, OutputIteratorType>>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(label,
+                          RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                          func_type(init_value, first_from, first_dest));
+
+  ex.fence("Kokkos::exclusive_scan_default_op: fence after operation");
+
+  return first_dest + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FillFillN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FillFillN.hpp
new file mode 100644 (file)
index 0000000..843771b
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FILL_AND_FILL_N_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FILL_AND_FILL_N_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class T>
+struct StdFillFunctor {
+  using index_type = typename InputIterator::difference_type;
+  InputIterator m_first;
+  T m_value;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i) const { m_first[i] = m_value; }
+
+  KOKKOS_FUNCTION
+  StdFillFunctor(InputIterator _first, T _value)
+      : m_first(std::move(_first)), m_value(std::move(_value)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class T>
+void fill_impl(const std::string& label, const ExecutionSpace& ex,
+               IteratorType first, IteratorType last, const T& value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         StdFillFunctor<IteratorType, T>(first, value));
+  ex.fence("Kokkos::fill: fence after operation");
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType, class T>
+IteratorType fill_n_impl(const std::string& label, const ExecutionSpace& ex,
+                         IteratorType first, SizeType n, const T& value) {
+  auto last = first + n;
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  if (n <= 0) {
+    return first;
+  }
+
+  fill_impl(label, ex, first, last, value);
+  return last;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindEnd.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindEnd.hpp
new file mode 100644 (file)
index 0000000..35a6c4b
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_END_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_END_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class ReducerType, class PredicateType>
+struct StdFindEndFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType1 m_first;
+  IteratorType1 m_last;
+  IteratorType2 m_s_first;
+  IteratorType2 m_s_last;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    namespace KE = ::Kokkos::Experimental;
+    auto myit    = m_first + i;
+    bool found   = true;
+
+    const auto search_count = KE::distance(m_s_first, m_s_last);
+    for (IndexType k = 0; k < search_count; ++k) {
+      // note that we add this EXPECT to check if we are in a valid range
+      // but I think we can remvoe this beceause the guarantee we don't go
+      // out of bounds is taken care of at the calling site
+      // where we launch the par-reduce.
+      KOKKOS_EXPECTS((myit + k) < m_last);
+
+      if (!m_p(myit[k], m_s_first[k])) {
+        found = false;
+        break;
+      }
+    }
+
+    const auto rv =
+        found ? red_value_type{i}
+              : red_value_type{::Kokkos::reduction_identity<IndexType>::max()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdFindEndFunctor(IteratorType1 first, IteratorType1 last,
+                    IteratorType2 s_first, IteratorType2 s_last,
+                    ReducerType reducer, PredicateType p)
+      : m_first(std::move(first)),
+        m_last(std::move(last)),
+        m_s_first(std::move(s_first)),
+        m_s_last(std::move(s_last)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 find_end_impl(const std::string& label, const ExecutionSpace& ex,
+                            IteratorType1 first, IteratorType1 last,
+                            IteratorType2 s_first, IteratorType2 s_last,
+                            const BinaryPredicateType& pred) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, s_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
+  Impl::expect_valid_range(first, last);
+  Impl::expect_valid_range(s_first, s_last);
+
+  // the target sequence should not be larger than the range [first, last)
+  namespace KE            = ::Kokkos::Experimental;
+  const auto num_elements = KE::distance(first, last);
+  const auto s_count      = KE::distance(s_first, s_last);
+  KOKKOS_EXPECTS(num_elements >= s_count);
+  (void)s_count;  // needed when macro above is a no-op
+
+  if (s_first == s_last) {
+    return last;
+  }
+
+  if (first == last) {
+    return last;
+  }
+
+  // special case where the two ranges have equal size
+  if (num_elements == s_count) {
+    const auto equal_result = equal_impl(label, ex, first, last, s_first, pred);
+    return (equal_result) ? first : last;
+  } else {
+    using index_type           = typename IteratorType1::difference_type;
+    using reducer_type         = LastLoc<index_type>;
+    using reduction_value_type = typename reducer_type::value_type;
+    using func_t = StdFindEndFunctor<index_type, IteratorType1, IteratorType2,
+                                     reducer_type, BinaryPredicateType>;
+
+    // run
+    reduction_value_type red_result;
+    reducer_type reducer(red_result);
+
+    // decide the size of the range policy of the par_red:
+    // note that the last feasible index to start looking is the index
+    // whose distance from the "last" is equal to the sequence count.
+    // the +1 is because we need to include that location too.
+    const auto range_size = num_elements - s_count + 1;
+
+    // run par reduce
+    ::Kokkos::parallel_reduce(
+        label, RangePolicy<ExecutionSpace>(ex, 0, range_size),
+        func_t(first, last, s_first, s_last, reducer, pred), reducer);
+
+    // fence not needed because reducing into scalar
+
+    // decide and return
+    if (red_result.max_loc_true ==
+        ::Kokkos::reduction_identity<index_type>::max()) {
+      // if here, a subrange has not been found
+      return last;
+    } else {
+      // a location has been found
+      return first + red_result.max_loc_true;
+    }
+  }
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 find_end_impl(const std::string& label, const ExecutionSpace& ex,
+                            IteratorType1 first, IteratorType1 last,
+                            IteratorType2 s_first, IteratorType2 s_last) {
+  using value_type1    = typename IteratorType1::value_type;
+  using value_type2    = typename IteratorType2::value_type;
+  using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+  return find_end_impl(label, ex, first, last, s_first, s_last,
+                       predicate_type());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindFirstOf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindFirstOf.hpp
new file mode 100644 (file)
index 0000000..6907bbd
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_FIRST_OF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class ReducerType, class PredicateType>
+struct StdFindFirstOfFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType1 m_first;
+  IteratorType2 m_s_first;
+  IteratorType2 m_s_last;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    namespace KE        = ::Kokkos::Experimental;
+    const auto& myvalue = m_first[i];
+    bool found          = false;
+
+    const auto search_count = KE::distance(m_s_first, m_s_last);
+    for (IndexType k = 0; k < search_count; ++k) {
+      if (m_p(myvalue, m_s_first[k])) {
+        found = true;
+        break;
+      }
+    }
+
+    const auto rv =
+        found ? red_value_type{i}
+              : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdFindFirstOfFunctor(IteratorType1 first, IteratorType2 s_first,
+                        IteratorType2 s_last, ReducerType reducer,
+                        PredicateType p)
+      : m_first(std::move(first)),
+        m_s_first(std::move(s_first)),
+        m_s_last(std::move(s_last)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 find_first_of_impl(const std::string& label,
+                                 const ExecutionSpace& ex, IteratorType1 first,
+                                 IteratorType1 last, IteratorType2 s_first,
+                                 IteratorType2 s_last,
+                                 const BinaryPredicateType& pred) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, s_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
+  Impl::expect_valid_range(first, last);
+  Impl::expect_valid_range(s_first, s_last);
+
+  if ((s_first == s_last) || (first == last)) {
+    return last;
+  }
+
+  using index_type           = typename IteratorType1::difference_type;
+  using reducer_type         = FirstLoc<index_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using func_t = StdFindFirstOfFunctor<index_type, IteratorType1, IteratorType2,
+                                       reducer_type, BinaryPredicateType>;
+
+  // run
+  reduction_value_type red_result;
+  reducer_type reducer(red_result);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_t(first, s_first, s_last, reducer, pred), reducer);
+
+  // fence not needed because reducing into scalar
+
+  // decide and return
+  if (red_result.min_loc_true ==
+      ::Kokkos::reduction_identity<index_type>::min()) {
+    // if here, nothing found
+    return last;
+  } else {
+    // a location has been found
+    return first + red_result.min_loc_true;
+  }
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 find_first_of_impl(const std::string& label,
+                                 const ExecutionSpace& ex, IteratorType1 first,
+                                 IteratorType1 last, IteratorType2 s_first,
+                                 IteratorType2 s_last) {
+  using value_type1    = typename IteratorType1::value_type;
+  using value_type2    = typename IteratorType2::value_type;
+  using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+  return find_first_of_impl(label, ex, first, last, s_first, s_last,
+                            predicate_type());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindIfOrNot.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_FindIfOrNot.hpp
new file mode 100644 (file)
index 0000000..c79c4b5
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FIND_IF_AND_FIND_IF_NOT_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FIND_IF_AND_FIND_IF_NOT_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <bool is_find_if, class IndexType, class IteratorType,
+          class ReducerType, class PredicateType>
+struct StdFindIfOrNotFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType m_first;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    const auto& my_value = m_first[i];
+
+    // if doing find_if, look for when predicate is true
+    // if doing find_if_not, look for when predicate is false
+    const bool found_condition = is_find_if ? m_p(my_value) : !m_p(my_value);
+
+    auto rv =
+        found_condition
+            ? red_value_type{i}
+            : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdFindIfOrNotFunctor(IteratorType first, ReducerType reducer,
+                        PredicateType p)
+      : m_first(std::move(first)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <bool is_find_if, class ExecutionSpace, class IteratorType,
+          class PredicateType>
+IteratorType find_if_or_not_impl(const std::string& label,
+                                 const ExecutionSpace& ex, IteratorType first,
+                                 IteratorType last, PredicateType pred) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(
+      ex, first);  // only need one It per type
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    return last;
+  }
+
+  // aliases
+  using index_type           = typename IteratorType::difference_type;
+  using reducer_type         = FirstLoc<index_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using func_t = StdFindIfOrNotFunctor<is_find_if, index_type, IteratorType,
+                                       reducer_type, PredicateType>;
+
+  // run
+  reduction_value_type red_result;
+  reducer_type reducer(red_result);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_t(first, reducer, pred), reducer);
+
+  // fence not needed because reducing into scalar
+
+  // decide and return
+  if (red_result.min_loc_true ==
+      ::Kokkos::reduction_identity<index_type>::min()) {
+    // here, it means a valid loc has not been found,
+    return last;
+  } else {
+    // a location has been found
+    return first + red_result.min_loc_true;
+  }
+}
+
+template <class ExecutionSpace, class InputIterator, class T>
+InputIterator find_impl(const std::string& label, ExecutionSpace ex,
+                        InputIterator first, InputIterator last,
+                        const T& value) {
+  return find_if_or_not_impl<true>(
+      label, ex, first, last,
+      ::Kokkos::Experimental::Impl::StdAlgoEqualsValUnaryPredicate<T>(value));
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ForEachForEachN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ForEachForEachN.hpp
new file mode 100644 (file)
index 0000000..8bd37b1
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_FOR_EACH_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_FOR_EACH_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class UnaryFunctorType>
+struct StdForEachFunctor {
+  using index_type = typename IteratorType::difference_type;
+  IteratorType m_first;
+  UnaryFunctorType m_functor;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i) const { m_functor(m_first[i]); }
+
+  KOKKOS_FUNCTION
+  StdForEachFunctor(IteratorType _first, UnaryFunctorType _functor)
+      : m_first(std::move(_first)), m_functor(std::move(_functor)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class UnaryFunctorType>
+UnaryFunctorType for_each_impl(const std::string& label,
+                               const ExecutionSpace& ex, IteratorType first,
+                               IteratorType last, UnaryFunctorType functor) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      StdForEachFunctor<IteratorType, UnaryFunctorType>(first, functor));
+  ex.fence("Kokkos::for_each: fence after operation");
+
+  return functor;
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class UnaryFunctorType>
+IteratorType for_each_n_impl(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType first, SizeType n,
+                             UnaryFunctorType functor) {
+  auto last = first + n;
+  Impl::static_assert_random_access_and_accessible(ex, first, last);
+  Impl::expect_valid_range(first, last);
+
+  if (n == 0) {
+    return first;
+  }
+
+  for_each_impl(label, ex, first, last, std::move(functor));
+  // no neeed to fence since for_each_impl fences already
+
+  return last;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_GenerateGenerateN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_GenerateGenerateN.hpp
new file mode 100644 (file)
index 0000000..f01d9bf
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_GENERATE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_GENERATE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class Generator>
+struct StdGenerateFunctor {
+  using index_type = typename IteratorType::difference_type;
+  IteratorType m_first;
+  Generator m_generator;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i) const { m_first[i] = m_generator(); }
+
+  KOKKOS_FUNCTION
+  StdGenerateFunctor(IteratorType _first, Generator _g)
+      : m_first(std::move(_first)), m_generator(std::move(_g)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class Generator>
+void generate_impl(const std::string& label, const ExecutionSpace& ex,
+                   IteratorType first, IteratorType last, Generator g) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using func_t = StdGenerateFunctor<IteratorType, Generator>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first, g));
+  ex.fence("Kokkos::generate: fence after operation");
+}
+
+template <class ExecutionSpace, class IteratorType, class Size, class Generator>
+IteratorType generate_n_impl(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType first, Size count, Generator g) {
+  if (count <= 0) {
+    return first;
+  }
+
+  generate_impl(label, ex, first, first + count, g);
+  return first + count;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_HelperPredicates.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_HelperPredicates.hpp
new file mode 100644 (file)
index 0000000..244bce4
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_HELPER_PREDICATES_HPP
+#define KOKKOS_STD_ALGORITHMS_HELPER_PREDICATES_HPP
+
+#include <Kokkos_Macros.hpp>
+
+// naming convetion:
+// StdAlgoSomeExpressiveNameUnaryPredicate
+// StdAlgoSomeExpressiveNameBinaryPredicate
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+// ------------------
+// UNARY PREDICATES
+// ------------------
+template <class T>
+struct StdAlgoEqualsValUnaryPredicate {
+  T m_value;
+
+  KOKKOS_FUNCTION
+  constexpr bool operator()(const T& val) const { return val == m_value; }
+
+  KOKKOS_FUNCTION
+  constexpr explicit StdAlgoEqualsValUnaryPredicate(const T& _value)
+      : m_value(_value) {}
+};
+
+template <class T>
+struct StdAlgoNotEqualsValUnaryPredicate {
+  T m_value;
+
+  KOKKOS_FUNCTION
+  constexpr bool operator()(const T& val) const { return !(val == m_value); }
+
+  KOKKOS_FUNCTION
+  constexpr explicit StdAlgoNotEqualsValUnaryPredicate(const T& _value)
+      : m_value(_value) {}
+};
+
+template <class ValueType, class PredicateType>
+struct StdAlgoNegateUnaryPredicateWrapper {
+  PredicateType m_pred;
+
+  KOKKOS_FUNCTION
+  constexpr bool operator()(const ValueType& val) const { return !m_pred(val); }
+
+  KOKKOS_FUNCTION
+  constexpr explicit StdAlgoNegateUnaryPredicateWrapper(
+      const PredicateType& pred)
+      : m_pred(pred) {}
+};
+
+// ------------------
+// BINARY PREDICATES
+// ------------------
+template <class ValueType1, class ValueType2 = ValueType1>
+struct StdAlgoEqualBinaryPredicate {
+  KOKKOS_FUNCTION
+  constexpr bool operator()(const ValueType1& a, const ValueType2& b) const {
+    return a == b;
+  }
+};
+
+template <class ValueType1, class ValueType2 = ValueType1>
+struct StdAlgoLessThanBinaryPredicate {
+  KOKKOS_FUNCTION
+  constexpr bool operator()(const ValueType1& a, const ValueType2& b) const {
+    return a < b;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IdentityReferenceUnaryFunctor.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IdentityReferenceUnaryFunctor.hpp
new file mode 100644 (file)
index 0000000..f41e567
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_NUMERIC_IDENTITY_REFERENCE_UNARY_FUNCTOR_HPP
+#define KOKKOS_STD_ALGORITHMS_NUMERIC_IDENTITY_REFERENCE_UNARY_FUNCTOR_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ValueType>
+struct StdNumericScanIdentityReferenceUnaryFunctor {
+  KOKKOS_FUNCTION
+  constexpr const ValueType& operator()(const ValueType& a) const { return a; }
+};
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_InclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_InclusiveScan.hpp
new file mode 100644 (file)
index 0000000..2088ebd
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_INCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_TransformInclusiveScan.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <typename ValueType>
+using in_scan_has_reduction_identity_sum_t =
+    decltype(Kokkos::reduction_identity<ValueType>::sum());
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+          class FirstDest>
+struct InclusiveScanDefaultFunctorForKnownIdentityElement {
+  using execution_space = ExeSpace;
+
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+
+  KOKKOS_FUNCTION
+  InclusiveScanDefaultFunctorForKnownIdentityElement(FirstFrom first_from,
+                                                     FirstDest first_dest)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, ValueType& update,
+                  const bool final_pass) const {
+    update += m_first_from[i];
+
+    if (final_pass) {
+      m_first_dest[i] = update;
+    }
+  }
+};
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+          class FirstDest>
+struct InclusiveScanDefaultFunctor {
+  using execution_space = ExeSpace;
+  using value_type      = ValueWrapperForNoNeutralElement<ValueType>;
+
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+
+  KOKKOS_FUNCTION
+  InclusiveScanDefaultFunctor(FirstFrom first_from, FirstDest first_dest)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, value_type& update,
+                  const bool final_pass) const {
+    const auto tmp = value_type{m_first_from[i], false};
+    this->join(update, tmp);
+
+    if (final_pass) {
+      m_first_dest[i] = update.val;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  void init(value_type& update) const {
+    update.val        = {};
+    update.is_initial = true;
+  }
+
+  KOKKOS_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    if (update.is_initial) {
+      update.val = input.val;
+    } else {
+      update.val = update.val + input.val;
+    }
+    update.is_initial = false;
+  }
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType>
+OutputIteratorType inclusive_scan_default_op_impl(
+    const std::string& label, const ExecutionSpace& ex,
+    InputIteratorType first_from, InputIteratorType last_from,
+    OutputIteratorType first_dest) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using value_type =
+      std::remove_const_t<typename InputIteratorType::value_type>;
+  using func_type = std::conditional_t<
+      ::Kokkos::is_detected<in_scan_has_reduction_identity_sum_t,
+                            value_type>::value,
+      InclusiveScanDefaultFunctorForKnownIdentityElement<
+          ExecutionSpace, index_type, value_type, InputIteratorType,
+          OutputIteratorType>,
+      InclusiveScanDefaultFunctor<ExecutionSpace, index_type, value_type,
+                                  InputIteratorType, OutputIteratorType>>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(label,
+                          RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                          func_type(first_from, first_dest));
+  ex.fence("Kokkos::inclusive_scan_default_op: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+// -------------------------------------------------------------
+// inclusive_scan_custom_binary_op_impl
+// -------------------------------------------------------------
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType>
+OutputIteratorType inclusive_scan_custom_binary_op_impl(
+    const std::string& label, const ExecutionSpace& ex,
+    InputIteratorType first_from, InputIteratorType last_from,
+    OutputIteratorType first_dest, BinaryOpType binary_op) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using value_type =
+      std::remove_const_t<typename InputIteratorType::value_type>;
+  using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<value_type>;
+  using func_type     = TransformInclusiveScanNoInitValueFunctor<
+      ExecutionSpace, index_type, value_type, InputIteratorType,
+      OutputIteratorType, BinaryOpType, unary_op_type>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_type(first_from, first_dest, binary_op, unary_op_type()));
+  ex.fence("Kokkos::inclusive_scan_custom_binary_op: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+// -------------------------------------------------------------
+// inclusive_scan_custom_binary_op_impl with init_value
+// -------------------------------------------------------------
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType, class ValueType>
+OutputIteratorType inclusive_scan_custom_binary_op_impl(
+    const std::string& label, const ExecutionSpace& ex,
+    InputIteratorType first_from, InputIteratorType last_from,
+    OutputIteratorType first_dest, BinaryOpType binary_op,
+    ValueType init_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type    = typename InputIteratorType::difference_type;
+  using unary_op_type = StdNumericScanIdentityReferenceUnaryFunctor<ValueType>;
+  using func_type     = TransformInclusiveScanWithInitValueFunctor<
+      ExecutionSpace, index_type, ValueType, InputIteratorType,
+      OutputIteratorType, BinaryOpType, unary_op_type>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(label,
+                          RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                          func_type(first_from, first_dest, binary_op,
+                                    unary_op_type(), init_value));
+  ex.fence("Kokkos::inclusive_scan_custom_binary_op: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsPartitioned.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsPartitioned.hpp
new file mode 100644 (file)
index 0000000..0f00beb
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_PARTITIONED_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class ReducerType, class PredicateType>
+struct StdIsPartitionedFunctor {
+  using red_value_type = typename ReducerType::value_type;
+  using index_type     = typename IteratorType::difference_type;
+
+  IteratorType m_first;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, red_value_type& redValue) const {
+    const auto predicate_value = m_p(m_first[i]);
+    constexpr index_type m_red_id_min =
+        ::Kokkos::reduction_identity<index_type>::min();
+    constexpr index_type m_red_id_max =
+        ::Kokkos::reduction_identity<index_type>::max();
+    auto rv = predicate_value ? red_value_type{i, m_red_id_min}
+                              : red_value_type{m_red_id_max, i};
+
+    m_reducer.join(redValue, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdIsPartitionedFunctor(IteratorType first, ReducerType reducer,
+                          PredicateType p)
+      : m_first(std::move(first)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+bool is_partitioned_impl(const std::string& label, const ExecutionSpace& ex,
+                         IteratorType first, IteratorType last,
+                         PredicateType pred) {
+  // true if all elements in the range [first, last) that satisfy
+  // the predicate "pred" appear before all elements that don't.
+  // Also returns true if [first, last) is empty.
+  // also true if all elements satisfy the predicate.
+
+  // we implement it by finding:
+  // - the max location where predicate is true  (max_loc_true)
+  // - the min location where predicate is false (min_loc_false)
+  // so the range is partitioned if max_loc_true < (min_loc_false)
+
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // trivial case
+  if (first == last) {
+    return true;
+  }
+
+  // aliases
+  using index_type           = typename IteratorType::difference_type;
+  using reducer_type         = StdIsPartitioned<index_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using func_t =
+      StdIsPartitionedFunctor<IteratorType, reducer_type, PredicateType>;
+
+  // run
+  reduction_value_type red_result;
+  reducer_type reducer(red_result);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_t(first, reducer, pred), reducer);
+
+  // fence not needed because reducing into scalar
+
+  // decide and return
+  constexpr index_type red_id_min =
+      ::Kokkos::reduction_identity<index_type>::min();
+  constexpr index_type red_id_max =
+      ::Kokkos::reduction_identity<index_type>::max();
+
+  if (red_result.max_loc_true != red_id_max &&
+      red_result.min_loc_false != red_id_min) {
+    return red_result.max_loc_true < red_result.min_loc_false;
+  } else if (first + red_result.max_loc_true == --last) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsSorted.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsSorted.hpp
new file mode 100644 (file)
index 0000000..4e36ae3
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_SORTED_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class ComparatorType>
+struct StdIsSortedFunctor {
+  using index_type = typename IteratorType::difference_type;
+  IteratorType m_first;
+  ComparatorType m_comparator;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, std::size_t& update) const {
+    const auto& val_i   = m_first[i];
+    const auto& val_ip1 = m_first[i + 1];
+
+    if (m_comparator(val_ip1, val_i)) {
+      ++update;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdIsSortedFunctor(IteratorType _first1, ComparatorType comparator)
+      : m_first(std::move(_first1)), m_comparator(std::move(comparator)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+bool is_sorted_impl(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, IteratorType last,
+                    ComparatorType comp) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  if (num_elements <= 1) {
+    return true;
+  }
+
+  // use num_elements-1 because each index handles i and i+1
+  const auto num_elements_minus_one = num_elements - 1;
+  using functor_type = StdIsSortedFunctor<IteratorType, ComparatorType>;
+
+  // result is incremented by one if sorting breaks at index i
+  std::size_t result = 0;
+  ::Kokkos::parallel_reduce(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_minus_one),
+      functor_type(first, std::move(comp)), result);
+
+  return result == 0;
+}
+
+template <class ExecutionSpace, class IteratorType>
+bool is_sorted_impl(const std::string& label, const ExecutionSpace& ex,
+                    IteratorType first, IteratorType last) {
+  using value_type = typename IteratorType::value_type;
+  using pred_t     = Impl::StdAlgoLessThanBinaryPredicate<value_type>;
+  return is_sorted_impl(label, ex, first, last, pred_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsSortedUntil.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_IsSortedUntil.hpp
new file mode 100644 (file)
index 0000000..4e99c30
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_IS_SORTED_UNTIL_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <std_algorithms/Kokkos_Find.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class IndicatorViewType, class ComparatorType>
+struct StdIsSortedUntilFunctor {
+  using index_type = typename IteratorType::difference_type;
+  IteratorType m_first;
+  IndicatorViewType m_indicator;
+  ComparatorType m_comparator;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, int& update, const bool final) const {
+    const auto& val_i   = m_first[i];
+    const auto& val_ip1 = m_first[i + 1];
+
+    if (m_comparator(val_ip1, val_i)) {
+      ++update;
+    }
+
+    if (final) {
+      m_indicator(i) = update;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdIsSortedUntilFunctor(IteratorType _first1, IndicatorViewType indicator,
+                          ComparatorType comparator)
+      : m_first(std::move(_first1)),
+        m_indicator(std::move(indicator)),
+        m_comparator(std::move(comparator)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class ComparatorType>
+IteratorType is_sorted_until_impl(const std::string& label,
+                                  const ExecutionSpace& ex, IteratorType first,
+                                  IteratorType last, ComparatorType comp) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+
+  // trivial case
+  if (num_elements <= 1) {
+    return last;
+  }
+
+  /*
+    use scan and a helper "indicator" view
+    such that we scan the data and fill the indicator with
+    partial sum that is always 0 unless we find a pair that
+    breaks the sorting, so in that case the indicator will
+    have a 1 starting at the location where the sorting breaks.
+    So finding that 1 means finding the location we want.
+   */
+
+  // aliases
+  using indicator_value_type = std::size_t;
+  using indicator_view_type =
+      ::Kokkos::View<indicator_value_type*, ExecutionSpace>;
+  using functor_type =
+      StdIsSortedUntilFunctor<IteratorType, indicator_view_type,
+                              ComparatorType>;
+
+  // do scan
+  // use num_elements-1 because each index handles i and i+1
+  const auto num_elements_minus_one = num_elements - 1;
+  indicator_view_type indicator("is_sorted_until_indicator_helper",
+                                num_elements_minus_one);
+  ::Kokkos::parallel_scan(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_minus_one),
+      functor_type(first, indicator, std::move(comp)));
+
+  // try to find the first sentinel value, which indicates
+  // where the sorting condition breaks
+  namespace KE                                  = ::Kokkos::Experimental;
+  constexpr indicator_value_type sentinel_value = 1;
+  auto r =
+      KE::find(ex, KE::cbegin(indicator), KE::cend(indicator), sentinel_value);
+  const auto shift = r - ::Kokkos::Experimental::cbegin(indicator);
+
+  return first + (shift + 1);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType is_sorted_until_impl(const std::string& label,
+                                  const ExecutionSpace& ex, IteratorType first,
+                                  IteratorType last) {
+  using value_type = typename IteratorType::value_type;
+  using pred_t     = Impl::StdAlgoLessThanBinaryPredicate<value_type>;
+  return is_sorted_until_impl(label, ex, first, last, pred_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_LexicographicalCompare.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_LexicographicalCompare.hpp
new file mode 100644 (file)
index 0000000..c3dd13e
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_LEXICOGRAPHICAL_COMPARE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class ComparatorType>
+struct StdCompareFunctor {
+  IteratorType1 m_it1;
+  IteratorType2 m_it2;
+  ComparatorType m_predicate;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType /* i is unused */, int& lsum) const {
+    if (m_predicate(*m_it1, *m_it2)) {
+      lsum = 1;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdCompareFunctor(IteratorType1 _it1, IteratorType2 _it2,
+                    ComparatorType _predicate)
+      : m_it1(std::move(_it1)),
+        m_it2(std::move(_it2)),
+        m_predicate(std::move(_predicate)) {}
+};
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class ReducerType, class ComparatorType>
+struct StdLexicographicalCompareFunctor {
+  using red_value_type = typename ReducerType::value_type;
+  IteratorType1 m_first1;
+  IteratorType2 m_first2;
+  ReducerType m_reducer;
+  ComparatorType m_comparator;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    const auto& my_value1 = m_first1[i];
+    const auto& my_value2 = m_first2[i];
+
+    bool different = m_comparator(my_value1, my_value2) ||
+                     m_comparator(my_value2, my_value1);
+    auto rv =
+        different
+            ? red_value_type{i}
+            : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdLexicographicalCompareFunctor(IteratorType1 _first1, IteratorType2 _first2,
+                                   ReducerType _reducer, ComparatorType _comp)
+      : m_first1(std::move(_first1)),
+        m_first2(std::move(_first2)),
+        m_reducer(std::move(_reducer)),
+        m_comparator(std::move(_comp)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ComparatorType>
+bool lexicographical_compare_impl(const std::string& label,
+                                  const ExecutionSpace& ex,
+                                  IteratorType1 first1, IteratorType1 last1,
+                                  IteratorType2 first2, IteratorType2 last2,
+                                  ComparatorType comp) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+  Impl::expect_valid_range(first1, last1);
+  Impl::expect_valid_range(first2, last2);
+
+  // aliases
+  using index_type           = typename IteratorType1::difference_type;
+  using reducer_type         = FirstLoc<index_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+
+  // run
+  const auto d1    = Kokkos::Experimental::distance(first1, last1);
+  const auto d2    = Kokkos::Experimental::distance(first2, last2);
+  const auto range = Kokkos::min(d1, d2);
+  reduction_value_type red_result;
+  reducer_type reducer(red_result);
+  using func1_t =
+      StdLexicographicalCompareFunctor<index_type, IteratorType1, IteratorType2,
+                                       reducer_type, ComparatorType>;
+
+  ::Kokkos::parallel_reduce(label, RangePolicy<ExecutionSpace>(ex, 0, range),
+                            func1_t(first1, first2, reducer, comp), reducer);
+
+  // fence not needed because reducing into scalar
+  // no mismatch
+  if (red_result.min_loc_true ==
+      ::Kokkos::reduction_identity<index_type>::min()) {
+    auto new_last1 = first1 + range;
+    auto new_last2 = first2 + range;
+    bool is_prefix = (new_last1 == last1) && (new_last2 != last2);
+    return is_prefix;
+  }
+
+  // check mismatched
+  int less      = 0;
+  auto it1      = first1 + red_result.min_loc_true;
+  auto it2      = first2 + red_result.min_loc_true;
+  using func2_t = StdCompareFunctor<index_type, IteratorType1, IteratorType2,
+                                    ComparatorType>;
+  ::Kokkos::parallel_reduce(label, RangePolicy<ExecutionSpace>(ex, 0, 1),
+                            func2_t(it1, it2, comp), less);
+
+  // fence not needed because reducing into scalar
+  return static_cast<bool>(less);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+bool lexicographical_compare_impl(const std::string& label,
+                                  const ExecutionSpace& ex,
+                                  IteratorType1 first1, IteratorType1 last1,
+                                  IteratorType2 first2, IteratorType2 last2) {
+  using value_type_1 = typename IteratorType1::value_type;
+  using value_type_2 = typename IteratorType2::value_type;
+  using predicate_t =
+      Impl::StdAlgoLessThanBinaryPredicate<value_type_1, value_type_2>;
+  return lexicographical_compare_impl(label, ex, first1, last1, first2, last2,
+                                      predicate_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_MinMaxMinmaxElement.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_MinMaxMinmaxElement.hpp
new file mode 100644 (file)
index 0000000..0a9d41b
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MIN_MAX_MINMAX_ELEMENT_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_MIN_MAX_MINMAX_ELEMENT_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class ReducerType>
+struct StdMinOrMaxElemFunctor {
+  using index_type     = typename IteratorType::difference_type;
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType m_first;
+  ReducerType m_reducer;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, red_value_type& red_value) const {
+    m_reducer.join(red_value, red_value_type{m_first[i], i});
+  }
+
+  KOKKOS_FUNCTION
+  StdMinOrMaxElemFunctor(IteratorType first, ReducerType reducer)
+      : m_first(std::move(first)), m_reducer(std::move(reducer)) {}
+};
+
+template <class IteratorType, class ReducerType>
+struct StdMinMaxElemFunctor {
+  using index_type     = typename IteratorType::difference_type;
+  using red_value_type = typename ReducerType::value_type;
+  IteratorType m_first;
+  ReducerType m_reducer;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, red_value_type& red_value) const {
+    const auto& my_value = m_first[i];
+    m_reducer.join(red_value, red_value_type{my_value, my_value, i, i});
+  }
+
+  KOKKOS_FUNCTION
+  StdMinMaxElemFunctor(IteratorType first, ReducerType reducer)
+      : m_first(std::move(first)), m_reducer(std::move(reducer)) {}
+};
+
+template <template <class... Args> class ReducerType, class ExecutionSpace,
+          class IteratorType, class... Args>
+IteratorType min_or_max_element_impl(const std::string& label,
+                                     const ExecutionSpace& ex,
+                                     IteratorType first, IteratorType last,
+                                     Args&&... args) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    return last;
+  }
+
+  // aliases
+  using index_type           = typename IteratorType::difference_type;
+  using value_type           = typename IteratorType::value_type;
+  using reducer_type         = ReducerType<value_type, index_type, Args...>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using func_t = StdMinOrMaxElemFunctor<IteratorType, reducer_type>;
+
+  // run
+  reduction_value_type red_result;
+  reducer_type reducer(red_result, std::forward<Args>(args)...);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_t(first, reducer), reducer);
+
+  // fence not needed because reducing into scalar
+
+  // return
+  return first + red_result.loc;
+}
+
+template <template <class... Args> class ReducerType, class ExecutionSpace,
+          class IteratorType, class... Args>
+::Kokkos::pair<IteratorType, IteratorType> minmax_element_impl(
+    const std::string& label, const ExecutionSpace& ex, IteratorType first,
+    IteratorType last, Args&&... args) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    return {first, first};
+  }
+
+  // aliases
+  using index_type           = typename IteratorType::difference_type;
+  using value_type           = typename IteratorType::value_type;
+  using reducer_type         = ReducerType<value_type, index_type, Args...>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using func_t               = StdMinMaxElemFunctor<IteratorType, reducer_type>;
+
+  // run
+  reduction_value_type red_result;
+  reducer_type reducer(red_result, std::forward<Args>(args)...);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_t(first, reducer), reducer);
+
+  // fence not needed because reducing into scalar
+
+  // return
+  return {first + red_result.min_loc, first + red_result.max_loc};
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Mismatch.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Mismatch.hpp
new file mode 100644 (file)
index 0000000..180afe9
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MISMATCH_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_MISMATCH_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class ReducerType, class BinaryPredicateType>
+struct StdMismatchRedFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType1 m_first1;
+  IteratorType2 m_first2;
+  ReducerType m_reducer;
+  BinaryPredicateType m_predicate;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    const auto& my_value1 = m_first1[i];
+    const auto& my_value2 = m_first2[i];
+
+    auto rv =
+        !m_predicate(my_value1, my_value2)
+            ? red_value_type{i}
+            : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdMismatchRedFunctor(IteratorType1 first1, IteratorType2 first2,
+                        ReducerType reducer, BinaryPredicateType predicate)
+      : m_first1(std::move(first1)),
+        m_first2(std::move(first2)),
+        m_reducer(std::move(reducer)),
+        m_predicate(std::move(predicate)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch_impl(
+    const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+    IteratorType1 last1, IteratorType2 first2, IteratorType2 last2,
+    BinaryPredicateType predicate) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+  Impl::expect_valid_range(first1, last1);
+  Impl::expect_valid_range(first2, last2);
+
+  // aliases
+  using return_type          = ::Kokkos::pair<IteratorType1, IteratorType2>;
+  using index_type           = typename IteratorType1::difference_type;
+  using reducer_type         = FirstLoc<index_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using functor_type =
+      StdMismatchRedFunctor<index_type, IteratorType1, IteratorType2,
+                            reducer_type, BinaryPredicateType>;
+
+  // trivial case: note that this is important,
+  // for OpenMPTarget, omitting special handling of
+  // the trivial case was giving all sorts of strange stuff.
+  const auto num_e1 = last1 - first1;
+  const auto num_e2 = last2 - first2;
+  if (num_e1 == 0 || num_e2 == 0) {
+    return return_type(first1, first2);
+  }
+
+  // run
+  const auto num_elemen_par_reduce = (num_e1 <= num_e2) ? num_e1 : num_e2;
+  reduction_value_type red_result;
+  reducer_type reducer(red_result);
+  ::Kokkos::parallel_reduce(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elemen_par_reduce),
+      functor_type(first1, first2, reducer, std::move(predicate)), reducer);
+
+  // fence not needed because reducing into scalar
+
+  // decide and return
+  constexpr auto red_min = ::Kokkos::reduction_identity<index_type>::min();
+  if (red_result.min_loc_true == red_min) {
+    // in here means mismatch has not been found
+    if (num_e1 == num_e2) {
+      return return_type(last1, last2);
+    } else if (num_e1 < num_e2) {
+      return return_type(last1, first2 + num_e1);
+    } else {
+      return return_type(first1 + num_e2, last2);
+    }
+  } else {
+    // in here means mismatch has been found
+    return return_type(first1 + red_result.min_loc_true,
+                       first2 + red_result.min_loc_true);
+  }
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+::Kokkos::pair<IteratorType1, IteratorType2> mismatch_impl(
+    const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+    IteratorType1 last1, IteratorType2 first2, IteratorType2 last2) {
+  using value_type1 = typename IteratorType1::value_type;
+  using value_type2 = typename IteratorType2::value_type;
+  using pred_t      = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+  return mismatch_impl(label, ex, first1, last1, first2, last2, pred_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Move.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Move.hpp
new file mode 100644 (file)
index 0000000..6b1ed1d
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIterator, class OutputIterator>
+struct StdMoveFunctor {
+  InputIterator m_first;
+  OutputIterator m_dest_first;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const {
+    m_dest_first[i] = std::move(m_first[i]);
+  }
+
+  StdMoveFunctor(InputIterator _first, OutputIterator _dest_first)
+      : m_first(std::move(_first)), m_dest_first(std::move(_dest_first)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator move_impl(const std::string& label, const ExecutionSpace& ex,
+                         InputIterator first, InputIterator last,
+                         OutputIterator d_first) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using index_type = typename InputIterator::difference_type;
+  using func_t     = StdMoveFunctor<index_type, InputIterator, OutputIterator>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first, d_first));
+  ex.fence("Kokkos::move: fence after operation");
+
+  // return
+  return d_first + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_MoveBackward.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_MoveBackward.hpp
new file mode 100644 (file)
index 0000000..c34ab67
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_MOVE_BACKWARD_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2>
+struct StdMoveBackwardFunctor {
+  static_assert(std::is_signed<IndexType>::value,
+                "Kokkos: StdMoveBackwardFunctor requires signed index type");
+
+  IteratorType1 m_last;
+  IteratorType2 m_dest_last;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const {
+    m_dest_last[-i - 1] = std::move(m_last[-i - 1]);
+  }
+
+  StdMoveBackwardFunctor(IteratorType1 _last, IteratorType2 _dest_last)
+      : m_last(std::move(_last)), m_dest_last(std::move(_dest_last)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 move_backward_impl(const std::string& label,
+                                 const ExecutionSpace& ex, IteratorType1 first,
+                                 IteratorType1 last, IteratorType2 d_last) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_last);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_last);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using index_type = typename IteratorType1::difference_type;
+  using func_t =
+      StdMoveBackwardFunctor<index_type, IteratorType1, IteratorType2>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(last, d_last));
+  ex.fence("Kokkos::move_backward: fence after operation");
+
+  // return
+  return d_last - num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_PartitionCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_PartitionCopy.hpp
new file mode 100644 (file)
index 0000000..508e4ba
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_PARTITION_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ValueType>
+struct StdPartitionCopyScalar {
+  ValueType true_count_;
+  ValueType false_count_;
+
+  // Here we implement the copy assignment operators explicitly for consistency
+  // with how the Scalar structs are implemented inside
+  // Kokkos_Parallel_Reduce.hpp.
+  KOKKOS_FUNCTION
+  void operator=(const StdPartitionCopyScalar& other) {
+    true_count_  = other.true_count_;
+    false_count_ = other.false_count_;
+  }
+
+  // this is needed for
+  // OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp:699:21: error: no viable
+  // overloaded '=' m_returnvalue = 0;
+  //
+  KOKKOS_FUNCTION
+  void operator=(const ValueType value) {
+    true_count_  = value;
+    false_count_ = value;
+  }
+};
+
+template <class IndexType, class FirstFrom, class FirstDestTrue,
+          class FirstDestFalse, class PredType>
+struct StdPartitionCopyFunctor {
+  using value_type = StdPartitionCopyScalar<IndexType>;
+
+  FirstFrom m_first_from;
+  FirstDestTrue m_first_dest_true;
+  FirstDestFalse m_first_dest_false;
+  PredType m_pred;
+
+  KOKKOS_FUNCTION
+  StdPartitionCopyFunctor(FirstFrom first_from, FirstDestTrue first_dest_true,
+                          FirstDestFalse first_dest_false, PredType pred)
+      : m_first_from(std::move(first_from)),
+        m_first_dest_true(std::move(first_dest_true)),
+        m_first_dest_false(std::move(first_dest_false)),
+        m_pred(std::move(pred)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, value_type& update,
+                  const bool final_pass) const {
+    const auto& myval = m_first_from[i];
+    if (final_pass) {
+      if (m_pred(myval)) {
+        m_first_dest_true[update.true_count_] = myval;
+      } else {
+        m_first_dest_false[update.false_count_] = myval;
+      }
+    }
+
+    if (m_pred(myval)) {
+      update.true_count_ += 1;
+    } else {
+      update.false_count_ += 1;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  void init(value_type& update) const {
+    update.true_count_  = 0;
+    update.false_count_ = 0;
+  }
+
+  KOKKOS_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    update.true_count_ += input.true_count_;
+    update.false_count_ += input.false_count_;
+  }
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorTrueType, class OutputIteratorFalseType,
+          class PredicateType>
+::Kokkos::pair<OutputIteratorTrueType, OutputIteratorFalseType>
+partition_copy_impl(const std::string& label, const ExecutionSpace& ex,
+                    InputIteratorType from_first, InputIteratorType from_last,
+                    OutputIteratorTrueType to_first_true,
+                    OutputIteratorFalseType to_first_false,
+                    PredicateType pred) {
+  // impl uses a scan, this is similar how we implemented copy_if
+
+  // checks
+  Impl::static_assert_random_access_and_accessible(
+      ex, from_first, to_first_true, to_first_false);
+  Impl::static_assert_iterators_have_matching_difference_type(
+      from_first, to_first_true, to_first_false);
+  Impl::expect_valid_range(from_first, from_last);
+
+  if (from_first == from_last) {
+    return {to_first_true, to_first_false};
+  }
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using func_type =
+      StdPartitionCopyFunctor<index_type, InputIteratorType,
+                              OutputIteratorTrueType, OutputIteratorFalseType,
+                              PredicateType>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(from_first, from_last);
+  typename func_type::value_type counts{0, 0};
+  ::Kokkos::parallel_scan(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_type(from_first, to_first_true, to_first_false, pred), counts);
+
+  // fence not needed here because of the scan into counts
+
+  return {to_first_true + counts.true_count_,
+          to_first_false + counts.false_count_};
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_PartitionPoint.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_PartitionPoint.hpp
new file mode 100644 (file)
index 0000000..671e8d7
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_PARTITION_POINT_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_PARTITION_POINT_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class ReducerType, class PredicateType>
+struct StdPartitionPointFunctor {
+  using red_value_type = typename ReducerType::value_type;
+  using index_type     = typename IteratorType::difference_type;
+
+  IteratorType m_first;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, red_value_type& redValue) const {
+    const auto predicate_value = m_p(m_first[i]);
+    auto rv =
+        predicate_value
+            ? red_value_type{::Kokkos::reduction_identity<index_type>::min()}
+            : red_value_type{i};
+    m_reducer.join(redValue, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdPartitionPointFunctor(IteratorType first, ReducerType reducer,
+                           PredicateType p)
+      : m_first(std::move(first)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+IteratorType partition_point_impl(const std::string& label,
+                                  const ExecutionSpace& ex, IteratorType first,
+                                  IteratorType last, PredicateType pred) {
+  // locates the end of the first partition, that is, the first
+  // element that does not satisfy p or last if all elements satisfy p.
+  // Implementation below finds the first location where p is false.
+
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    return first;
+  }
+
+  // aliases
+  using index_type           = typename IteratorType::difference_type;
+  using reducer_type         = StdPartitionPoint<index_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+  using func_t =
+      StdPartitionPointFunctor<IteratorType, reducer_type, PredicateType>;
+
+  // run
+  reduction_value_type red_result;
+  reducer_type reducer(red_result);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            func_t(first, reducer, pred), reducer);
+
+  // fence not needed because reducing into scalar
+
+  // decide and return
+  if (red_result.min_loc_false ==
+      ::Kokkos::reduction_identity<index_type>::min()) {
+    // if all elements are true, return last
+    return last;
+  } else {
+    return first + red_result.min_loc_false;
+  }
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RandomAccessIterator.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RandomAccessIterator.hpp
new file mode 100644 (file)
index 0000000..2457d94
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_RANDOM_ACCESS_ITERATOR_IMPL_HPP
+#define KOKKOS_RANDOM_ACCESS_ITERATOR_IMPL_HPP
+
+#include <iterator>
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_View.hpp>
+#include "Kokkos_Constraints.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class T>
+class RandomAccessIterator;
+
+template <class DataType, class... Args>
+class RandomAccessIterator< ::Kokkos::View<DataType, Args...> > {
+ public:
+  using view_type     = ::Kokkos::View<DataType, Args...>;
+  using iterator_type = RandomAccessIterator<view_type>;
+
+  using iterator_category = std::random_access_iterator_tag;
+  using value_type        = typename view_type::value_type;
+  using difference_type   = ptrdiff_t;
+  using pointer           = typename view_type::pointer_type;
+  using reference         = typename view_type::reference_type;
+
+  static_assert(view_type::rank == 1 &&
+                    (std::is_same<typename view_type::traits::array_layout,
+                                  Kokkos::LayoutLeft>::value ||
+                     std::is_same<typename view_type::traits::array_layout,
+                                  Kokkos::LayoutRight>::value ||
+                     std::is_same<typename view_type::traits::array_layout,
+                                  Kokkos::LayoutStride>::value),
+                "RandomAccessIterator only supports 1D Views with LayoutLeft, "
+                "LayoutRight, LayoutStride.");
+
+  KOKKOS_DEFAULTED_FUNCTION RandomAccessIterator() = default;
+
+  explicit KOKKOS_FUNCTION RandomAccessIterator(const view_type view)
+      : m_view(view) {}
+  explicit KOKKOS_FUNCTION RandomAccessIterator(const view_type view,
+                                                ptrdiff_t current_index)
+      : m_view(view), m_current_index(current_index) {}
+
+  KOKKOS_FUNCTION
+  iterator_type& operator++() {
+    ++m_current_index;
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  iterator_type operator++(int) {
+    auto tmp = *this;
+    ++*this;
+    return tmp;
+  }
+
+  KOKKOS_FUNCTION
+  iterator_type& operator--() {
+    --m_current_index;
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  iterator_type operator--(int) {
+    auto tmp = *this;
+    --*this;
+    return tmp;
+  }
+
+  KOKKOS_FUNCTION
+  reference operator[](difference_type n) const {
+    return m_view(m_current_index + n);
+  }
+
+  KOKKOS_FUNCTION
+  iterator_type& operator+=(difference_type n) {
+    m_current_index += n;
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  iterator_type& operator-=(difference_type n) {
+    m_current_index -= n;
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  iterator_type operator+(difference_type n) const {
+    return iterator_type(m_view, m_current_index + n);
+  }
+
+  KOKKOS_FUNCTION
+  iterator_type operator-(difference_type n) const {
+    return iterator_type(m_view, m_current_index - n);
+  }
+
+  KOKKOS_FUNCTION
+  difference_type operator-(iterator_type it) const {
+    return m_current_index - it.m_current_index;
+  }
+
+  KOKKOS_FUNCTION
+  bool operator==(iterator_type other) const {
+    return m_current_index == other.m_current_index &&
+           m_view.data() == other.m_view.data();
+  }
+
+  KOKKOS_FUNCTION
+  bool operator!=(iterator_type other) const {
+    return m_current_index != other.m_current_index ||
+           m_view.data() != other.m_view.data();
+  }
+
+  KOKKOS_FUNCTION
+  bool operator<(iterator_type other) const {
+    return m_current_index < other.m_current_index;
+  }
+
+  KOKKOS_FUNCTION
+  bool operator<=(iterator_type other) const {
+    return m_current_index <= other.m_current_index;
+  }
+
+  KOKKOS_FUNCTION
+  bool operator>(iterator_type other) const {
+    return m_current_index > other.m_current_index;
+  }
+
+  KOKKOS_FUNCTION
+  bool operator>=(iterator_type other) const {
+    return m_current_index >= other.m_current_index;
+  }
+
+  KOKKOS_FUNCTION
+  reference operator*() const { return m_view(m_current_index); }
+
+ private:
+  view_type m_view;
+  ptrdiff_t m_current_index = 0;
+};
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Reduce.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Reduce.hpp
new file mode 100644 (file)
index 0000000..26e0795
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REDUCE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REDUCE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_ReducerWithArbitraryJoinerNoNeutralElement.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IteratorType, class ValueType>
+struct StdReduceDefaultFunctor {
+  using index_type = typename IteratorType::difference_type;
+
+  const IteratorType m_first;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, ValueType& update) const {
+    update += m_first[i];
+  }
+};
+
+template <class ValueType>
+struct StdReduceDefaultJoinFunctor {
+  KOKKOS_FUNCTION
+  constexpr ValueType operator()(const ValueType& a, const ValueType& b) const {
+    return a + b;
+  }
+};
+
+template <class IteratorType, class ReducerType>
+struct StdReduceFunctor {
+  using red_value_type = typename ReducerType::value_type;
+  using index_type     = typename IteratorType::difference_type;
+
+  const IteratorType m_first;
+  const ReducerType m_reducer;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, red_value_type& red_value) const {
+    auto tmp_wrapped_value = red_value_type{m_first[i], false};
+
+    if (red_value.is_initial) {
+      red_value = tmp_wrapped_value;
+    } else {
+      m_reducer.join(red_value, tmp_wrapped_value);
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdReduceFunctor(IteratorType first, ReducerType reducer)
+      : m_first(std::move(first)), m_reducer(std::move(reducer)) {}
+};
+
+//------------------------------
+// reduce_custom_functors_impl
+//------------------------------
+template <class ExecutionSpace, class IteratorType, class ValueType,
+          class JoinerType>
+ValueType reduce_custom_functors_impl(const std::string& label,
+                                      const ExecutionSpace& ex,
+                                      IteratorType first, IteratorType last,
+                                      ValueType init_reduction_value,
+                                      JoinerType joiner) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    // init is returned, unmodified
+    return init_reduction_value;
+  }
+
+  // aliases
+  using reducer_type =
+      ReducerWithArbitraryJoinerNoNeutralElement<ValueType, JoinerType>;
+  using functor_type         = StdReduceFunctor<IteratorType, reducer_type>;
+  using reduction_value_type = typename reducer_type::value_type;
+
+  // run
+  reduction_value_type result;
+  reducer_type reducer(result, joiner);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            functor_type(first, reducer), reducer);
+
+  // fence not needed since reducing into scalar
+  return joiner(result.val, init_reduction_value);
+}
+
+template <typename ValueType>
+using has_reduction_identity_sum_t =
+    decltype(Kokkos::reduction_identity<ValueType>::sum());
+
+template <class ExecutionSpace, class IteratorType, class ValueType>
+ValueType reduce_default_functors_impl(const std::string& label,
+                                       const ExecutionSpace& ex,
+                                       IteratorType first, IteratorType last,
+                                       ValueType init_reduction_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::expect_valid_range(first, last);
+
+  using value_type = Kokkos::Impl::remove_cvref_t<ValueType>;
+
+  if (::Kokkos::is_detected<has_reduction_identity_sum_t, value_type>::value) {
+    if (first == last) {
+      // init is returned, unmodified
+      return init_reduction_value;
+    }
+
+    using functor_type =
+        Impl::StdReduceDefaultFunctor<IteratorType, value_type>;
+
+    // run
+    value_type tmp;
+    const auto num_elements = Kokkos::Experimental::distance(first, last);
+    ::Kokkos::parallel_reduce(label,
+                              RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                              functor_type{first}, tmp);
+    // fence not needed since reducing into scalar
+    tmp += init_reduction_value;
+    return tmp;
+  } else {
+    using joiner_type = Impl::StdReduceDefaultJoinFunctor<value_type>;
+    return reduce_custom_functors_impl(
+        label, ex, first, last, std::move(init_reduction_value), joiner_type());
+  }
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReducerWithArbitraryJoinerNoNeutralElement.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReducerWithArbitraryJoinerNoNeutralElement.hpp
new file mode 100644 (file)
index 0000000..d8e383b
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REDUCER_WITH_ARBITRARY_JOINER_NONEUTRAL_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_REDUCER_WITH_ARBITRARY_JOINER_NONEUTRAL_ELEMENT_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+// This reducer is here and not where all other reducers are
+// because it is inside Impl and also because it would not work
+// for OpenMPTarget backend. We can move this later.
+
+template <class Scalar, class JoinerType, class Space = HostSpace>
+struct ReducerWithArbitraryJoinerNoNeutralElement {
+  using scalar_type = std::remove_cv_t<Scalar>;
+
+ public:
+  // Required
+  using reducer =
+      ReducerWithArbitraryJoinerNoNeutralElement<Scalar, JoinerType, Space>;
+  using value_type = ValueWrapperForNoNeutralElement<scalar_type>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  JoinerType m_joiner;
+  result_view_type m_value;
+  bool m_references_scalar_v;
+
+ public:
+  KOKKOS_FUNCTION
+  ReducerWithArbitraryJoinerNoNeutralElement(value_type& value_,
+                                             JoinerType joiner_)
+      : m_joiner(joiner_), m_value(&value_), m_references_scalar_v(true) {}
+
+  KOKKOS_FUNCTION
+  ReducerWithArbitraryJoinerNoNeutralElement(const result_view_type& value_,
+                                             JoinerType joiner_)
+      : m_joiner(joiner_), m_value(value_), m_references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest.val = m_joiner(dest.val, src.val);
+  }
+
+  KOKKOS_FUNCTION
+  void init(value_type& val) const {
+    // I cannot call reduction_identity, so need to default this
+    val = {};
+  }
+
+  KOKKOS_FUNCTION
+  value_type& reference() const { return *m_value.data(); }
+
+  KOKKOS_FUNCTION
+  result_view_type view() const { return m_value; }
+
+  KOKKOS_FUNCTION
+  bool references_scalar() const { return m_references_scalar_v; }
+};
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RemoveAllVariants.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RemoveAllVariants.hpp
new file mode 100644 (file)
index 0000000..742d4d7
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REMOVE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REMOVE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <std_algorithms/Kokkos_CountIf.hpp>
+#include <std_algorithms/Kokkos_CopyIf.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class FirstFrom, class FirstDest, class PredType>
+struct StdRemoveIfStage1Functor {
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+  PredType m_must_remove;
+
+  KOKKOS_FUNCTION
+  StdRemoveIfStage1Functor(FirstFrom first_from, FirstDest first_dest,
+                           PredType pred)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_must_remove(std::move(pred)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, IndexType& update,
+                  const bool final_pass) const {
+    auto& myval = m_first_from[i];
+    if (final_pass) {
+      if (!m_must_remove(myval)) {
+        // calling move here is ok because we are inside final pass
+        // we are calling move assign as specified by the std
+        m_first_dest[update] = std::move(myval);
+      }
+    }
+
+    if (!m_must_remove(myval)) {
+      update += 1;
+    }
+  }
+};
+
+template <class IndexType, class InputIteratorType, class OutputIteratorType>
+struct StdRemoveIfStage2Functor {
+  InputIteratorType m_first_from;
+  OutputIteratorType m_first_to;
+
+  KOKKOS_FUNCTION
+  StdRemoveIfStage2Functor(InputIteratorType first_from,
+                           OutputIteratorType first_to)
+      : m_first_from(std::move(first_from)), m_first_to(std::move(first_to)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i) const {
+    m_first_to[i] = std::move(m_first_from[i]);
+  }
+};
+
+template <class ExecutionSpace, class IteratorType, class UnaryPredicateType>
+IteratorType remove_if_impl(const std::string& label, const ExecutionSpace& ex,
+                            IteratorType first, IteratorType last,
+                            UnaryPredicateType pred) {
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    return last;
+  } else {
+    // create tmp buffer to use to *move* all elements that we need to keep.
+    // note that the tmp buffer is just large enought to store
+    // all elements to keep, because ideally we do not need/want one
+    // as large as the original range.
+    // To allocate the right tmp view, we need a call to count_if.
+    // We could just do a "safe" allocation of a buffer as
+    // large as (last-first), but I think a call to count_if is more afforable.
+
+    // count how many elements we need to keep
+    // note that the elements to remove are those that meet the predicate
+    const auto remove_count =
+        ::Kokkos::Experimental::count_if(ex, first, last, pred);
+    const auto keep_count =
+        Kokkos::Experimental::distance(first, last) - remove_count;
+
+    // create helper tmp view
+    using value_type    = typename IteratorType::value_type;
+    using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
+    tmp_view_type tmp_view("std_remove_if_tmp_view", keep_count);
+    using tmp_readwrite_iterator_type = decltype(begin(tmp_view));
+
+    // in stage 1, *move* all elements to keep from original range to tmp
+    // we use similar impl as copy_if except that we *move* rather than copy
+    using index_type = typename IteratorType::difference_type;
+    using func1_type = StdRemoveIfStage1Functor<index_type, IteratorType,
+                                                tmp_readwrite_iterator_type,
+                                                UnaryPredicateType>;
+
+    const auto scan_num_elements = Kokkos::Experimental::distance(first, last);
+    index_type scan_count        = 0;
+    ::Kokkos::parallel_scan(
+        label, RangePolicy<ExecutionSpace>(ex, 0, scan_num_elements),
+        func1_type(first, begin(tmp_view), pred), scan_count);
+
+    // scan_count should be equal to keep_count
+    assert(scan_count == keep_count);
+    (void)scan_count;  // to avoid unused complaints
+
+    // stage 2, we do parfor to move from tmp to original range
+    using func2_type =
+        StdRemoveIfStage2Functor<index_type, tmp_readwrite_iterator_type,
+                                 IteratorType>;
+    ::Kokkos::parallel_for(
+        "remove_if_stage2_parfor",
+        RangePolicy<ExecutionSpace>(ex, 0, tmp_view.extent(0)),
+        func2_type(begin(tmp_view), first));
+    ex.fence("Kokkos::remove_if: fence after stage2");
+
+    // return
+    return first + keep_count;
+  }
+}
+
+template <class ExecutionSpace, class IteratorType, class ValueType>
+auto remove_impl(const std::string& label, const ExecutionSpace& ex,
+                 IteratorType first, IteratorType last,
+                 const ValueType& value) {
+  using predicate_type = StdAlgoEqualsValUnaryPredicate<ValueType>;
+  return remove_if_impl(label, ex, first, last, predicate_type(value));
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType>
+auto remove_copy_impl(const std::string& label, const ExecutionSpace& ex,
+                      InputIteratorType first_from, InputIteratorType last_from,
+                      OutputIteratorType first_dest, const ValueType& value) {
+  // this is like copy_if except that we need to *ignore* the elements
+  // that match the value, so we can solve this as follows:
+
+  using predicate_type = StdAlgoNotEqualsValUnaryPredicate<ValueType>;
+  return ::Kokkos::Experimental::copy_if(label, ex, first_from, last_from,
+                                         first_dest, predicate_type(value));
+}
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class UnaryPredicate>
+auto remove_copy_if_impl(const std::string& label, const ExecutionSpace& ex,
+                         InputIteratorType first_from,
+                         InputIteratorType last_from,
+                         OutputIteratorType first_dest,
+                         const UnaryPredicate& pred) {
+  // this is like copy_if except that we need to *ignore* the elements
+  // satisfying the pred, so we can solve this as follows:
+
+  using value_type = typename InputIteratorType::value_type;
+  using pred_wrapper_type =
+      StdAlgoNegateUnaryPredicateWrapper<value_type, UnaryPredicate>;
+  return ::Kokkos::Experimental::copy_if(label, ex, first_from, last_from,
+                                         first_dest, pred_wrapper_type(pred));
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Replace.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Replace.hpp
new file mode 100644 (file)
index 0000000..877ffa2
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class ValueType>
+struct StdReplaceFunctor {
+  using index_type = typename InputIterator::difference_type;
+  InputIterator m_first;
+  ValueType m_old_value;
+  ValueType m_new_value;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i) const {
+    if (m_first[i] == m_old_value) {
+      m_first[i] = m_new_value;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdReplaceFunctor(InputIterator first, ValueType old_value,
+                    ValueType new_value)
+      : m_first(std::move(first)),
+        m_old_value(std::move(old_value)),
+        m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class ValueType>
+void replace_impl(const std::string& label, const ExecutionSpace& ex,
+                  IteratorType first, IteratorType last,
+                  const ValueType& old_value, const ValueType& new_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using func_t = StdReplaceFunctor<IteratorType, ValueType>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first, old_value, new_value));
+  ex.fence("Kokkos::replace: fence after operation");
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceCopy.hpp
new file mode 100644 (file)
index 0000000..b75dde9
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class OutputIterator, class ValueType>
+struct StdReplaceCopyFunctor {
+  using index_type = typename InputIterator::difference_type;
+
+  InputIterator m_first_from;
+  OutputIterator m_first_dest;
+  ValueType m_old_value;
+  ValueType m_new_value;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i) const {
+    const auto& myvalue_from = m_first_from[i];
+
+    if (myvalue_from == m_old_value) {
+      m_first_dest[i] = m_new_value;
+    } else {
+      m_first_dest[i] = myvalue_from;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdReplaceCopyFunctor(InputIterator first_from, OutputIterator first_dest,
+                        ValueType old_value, ValueType new_value)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_old_value(std::move(old_value)),
+        m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType>
+OutputIteratorType replace_copy_impl(const std::string& label,
+                                     const ExecutionSpace& ex,
+                                     InputIteratorType first_from,
+                                     InputIteratorType last_from,
+                                     OutputIteratorType first_dest,
+                                     const ValueType& old_value,
+                                     const ValueType& new_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using func_t =
+      StdReplaceCopyFunctor<InputIteratorType, OutputIteratorType, ValueType>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first_from, first_dest, old_value, new_value));
+  ex.fence("Kokkos::replace_copy: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceCopyIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceCopyIf.hpp
new file mode 100644 (file)
index 0000000..8f7c814
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_COPY_IF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIterator, class OutputIterator,
+          class PredicateType, class ValueType>
+struct StdReplaceIfCopyFunctor {
+  InputIterator m_first_from;
+  OutputIterator m_first_dest;
+  PredicateType m_pred;
+  ValueType m_new_value;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const {
+    const auto& myvalue_from = m_first_from[i];
+
+    if (m_pred(myvalue_from)) {
+      m_first_dest[i] = m_new_value;
+    } else {
+      m_first_dest[i] = myvalue_from;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdReplaceIfCopyFunctor(InputIterator first_from, OutputIterator first_dest,
+                          PredicateType pred, ValueType new_value)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_pred(std::move(pred)),
+        m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class PredicateType, class ValueType>
+OutputIteratorType replace_copy_if_impl(const std::string& label,
+                                        const ExecutionSpace& ex,
+                                        InputIteratorType first_from,
+                                        InputIteratorType last_from,
+                                        OutputIteratorType first_dest,
+                                        PredicateType pred,
+                                        const ValueType& new_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using func_t =
+      StdReplaceIfCopyFunctor<index_type, InputIteratorType, OutputIteratorType,
+                              PredicateType, ValueType>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_t(first_from, first_dest, std::move(pred), new_value));
+  ex.fence("Kokkos::replace_copy_if: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceIf.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReplaceIf.hpp
new file mode 100644 (file)
index 0000000..6fe3301
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REPLACE_IF_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REPLACE_IF_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator, class PredicateType, class NewValueType>
+struct StdReplaceIfFunctor {
+  using index_type = typename InputIterator::difference_type;
+
+  InputIterator m_first;
+  PredicateType m_predicate;
+  NewValueType m_new_value;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i) const {
+    if (m_predicate(m_first[i])) {
+      m_first[i] = m_new_value;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdReplaceIfFunctor(InputIterator first, PredicateType pred,
+                      NewValueType new_value)
+      : m_first(std::move(first)),
+        m_predicate(std::move(pred)),
+        m_new_value(std::move(new_value)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class PredicateType,
+          class ValueType>
+void replace_if_impl(const std::string& label, const ExecutionSpace& ex,
+                     IteratorType first, IteratorType last, PredicateType pred,
+                     const ValueType& new_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using func_t = StdReplaceIfFunctor<IteratorType, PredicateType, ValueType>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first, std::move(pred), new_value));
+  ex.fence("Kokkos::replace_if: fence after operation");
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Reverse.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Reverse.hpp
new file mode 100644 (file)
index 0000000..f84eb2c
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <std_algorithms/Kokkos_Swap.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class InputIterator>
+struct StdReverseFunctor {
+  using index_type = typename InputIterator::difference_type;
+  static_assert(std::is_signed<index_type>::value,
+                "Kokkos: StdReverseFunctor requires signed index type");
+
+  InputIterator m_first;
+  InputIterator m_last;
+
+  KOKKOS_FUNCTION
+  void operator()(index_type i) const {
+    // the swap below is doing the same thing, but
+    // for Intel 18.0.5 does not work.
+    // But putting the impl directly here, it works.
+#ifdef KOKKOS_COMPILER_INTEL
+    typename InputIterator::value_type tmp = std::move(m_first[i]);
+    m_first[i]                             = std::move(m_last[-i - 1]);
+    m_last[-i - 1]                         = std::move(tmp);
+#else
+    ::Kokkos::Experimental::swap(m_first[i], m_last[-i - 1]);
+#endif
+  }
+
+  StdReverseFunctor(InputIterator first, InputIterator last)
+      : m_first(std::move(first)), m_last(std::move(last)) {}
+};
+
+template <class ExecutionSpace, class InputIterator>
+void reverse_impl(const std::string& label, const ExecutionSpace& ex,
+                  InputIterator first, InputIterator last) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using func_t = StdReverseFunctor<InputIterator>;
+
+  // run
+  if (last >= first + 2) {
+    // only need half
+    const auto num_elements = Kokkos::Experimental::distance(first, last) / 2;
+    ::Kokkos::parallel_for(label,
+                           RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                           func_t(first, last));
+    ex.fence("Kokkos::reverse: fence after operation");
+  }
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReverseCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ReverseCopy.hpp
new file mode 100644 (file)
index 0000000..88b6ed1
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_REVERSE_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_REVERSE_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIterator, class OutputIterator>
+struct StdReverseCopyFunctor {
+  static_assert(std::is_signed<IndexType>::value,
+                "Kokkos: StdReverseCopyFunctor requires signed index type");
+
+  InputIterator m_last;
+  OutputIterator m_dest_first;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const { m_dest_first[i] = m_last[-1 - i]; }
+
+  StdReverseCopyFunctor(InputIterator _last, OutputIterator _dest_first)
+      : m_last(std::move(_last)), m_dest_first(std::move(_dest_first)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator reverse_copy_impl(const std::string& label,
+                                 const ExecutionSpace& ex, InputIterator first,
+                                 InputIterator last, OutputIterator d_first) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using index_type = typename InputIterator::difference_type;
+  using func_t =
+      StdReverseCopyFunctor<index_type, InputIterator, OutputIterator>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(last, d_first));
+  ex.fence("Kokkos::reverse_copy: fence after operation");
+
+  // return
+  return d_first + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Rotate.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Rotate.hpp
new file mode 100644 (file)
index 0000000..c08cf1a
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ROTATE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Move.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType rotate_with_pivot_in_left_half(const std::string& label,
+                                            const ExecutionSpace& ex,
+                                            IteratorType first,
+                                            IteratorType n_first,
+                                            IteratorType last) {
+  /*
+    This impl is specific for when the n_first iterator points to
+    an element that is before or equal to the middle of the range.
+
+    If we have:
+
+    | 0 | 1 | 2 | 1 | 4 | 5 | 2 | 2 | 10 | -3 | 1 | -6 | -5 | 8 | 9 | 11 | *
+      ^           ^              mid                                      ^
+    first       n_first                                                          last
+
+    In step 1, we create a temporary view with extent = distance(n_first, last)
+    and *move* the elements from [n_first, last) to tmp view, such that
+    tmp view becomes:
+
+    | 1 | 4 | 5 | 2 | 2 | 10 | -3 | 1 | -6 | -5 | 8 | 9 | 11 |
+
+    In step 2, we move the elements in [first, n_first)
+    to the new position where they are supposed to end up.
+
+    In step 3, we move the elements from the tmp view to
+    the range starting at first.
+   */
+
+  namespace KE                     = ::Kokkos::Experimental;
+  const auto num_elements_on_left  = KE::distance(first, n_first);
+  const auto num_elements_on_right = KE::distance(n_first, last);
+
+  // create helper tmp view
+  using value_type    = typename IteratorType::value_type;
+  using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
+  tmp_view_type tmp_view("rotate_impl_for_pivot_in_left_half_impl",
+                         num_elements_on_right);
+  using tmp_readwrite_iterator_type = decltype(begin(tmp_view));
+
+  // index_type is the same and needed in all steps
+  using index_type = typename IteratorType::difference_type;
+
+  // stage 1
+  using step1_func_type =
+      StdMoveFunctor<index_type, IteratorType, tmp_readwrite_iterator_type>;
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_on_right),
+      step1_func_type(n_first, begin(tmp_view)));
+
+  // stage 2
+  using step2_func_type =
+      StdMoveFunctor<index_type, IteratorType, IteratorType>;
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_on_left),
+      step2_func_type(first, first + num_elements_on_right));
+
+  // step 3
+  using step3_func_type =
+      StdMoveFunctor<index_type, tmp_readwrite_iterator_type, IteratorType>;
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, tmp_view.extent(0)),
+                         step3_func_type(begin(tmp_view), first));
+
+  ex.fence("Kokkos::rotate: fence after operation");
+  return first + (last - n_first);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType rotate_with_pivot_in_right_half(const std::string& label,
+                                             const ExecutionSpace& ex,
+                                             IteratorType first,
+                                             IteratorType n_first,
+                                             IteratorType last) {
+  /*
+    This impl is specific for when the n_first iterator points to
+    an element that is after the middle of the range.
+
+    If we have:
+
+    | 0 | 1 | 2 | 1 | 4 | 5 | 2 | 2 | 10 | -3 | 1 | -6 | -5 | 8 | 9 | 11 | *
+      ^                          mid            ^                          ^
+    first                                    n_first                     last
+
+    In step 1, we create a temporary view with extent = distance(first, n_first)
+    and *move* the elements from [first, n_first) to tmp view,
+    such that tmp view becomes:
+
+    | 0 | 1 | 2 | 1 | 4 | 5 | 2 | 2 | 10 | -3 | 1 |
+
+    In step 2, we move the elements in [n_first, last)
+    to the beginning where they are supposed to end up.
+
+    In step 3, we move the elements from the tmp view to
+    the range starting at first.
+   */
+
+  namespace KE                     = ::Kokkos::Experimental;
+  const auto num_elements_on_left  = KE::distance(first, n_first);
+  const auto num_elements_on_right = KE::distance(n_first, last);
+
+  // create helper tmp view
+  using value_type    = typename IteratorType::value_type;
+  using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
+  tmp_view_type tmp_view("rotate_impl_for_pivot_in_left_half_impl",
+                         num_elements_on_left);
+  using tmp_readwrite_iterator_type = decltype(begin(tmp_view));
+
+  // index_type is the same and needed in all steps
+  using index_type = typename IteratorType::difference_type;
+
+  // stage 1
+  using step1_func_type =
+      StdMoveFunctor<index_type, IteratorType, tmp_readwrite_iterator_type>;
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_on_left),
+      step1_func_type(first, begin(tmp_view)));
+
+  // stage 2
+  using step2_func_type =
+      StdMoveFunctor<index_type, IteratorType, IteratorType>;
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_on_right),
+      step2_func_type(n_first, first));
+
+  // step 3:
+  using step3_func_type =
+      StdMoveFunctor<index_type, tmp_readwrite_iterator_type, IteratorType>;
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, tmp_view.extent(0)),
+      step3_func_type(begin(tmp_view), first + num_elements_on_right));
+
+  ex.fence("Kokkos::rotate: fence after operation");
+  return first + (last - n_first);
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType rotate_impl(const std::string& label, const ExecutionSpace& ex,
+                         IteratorType first, IteratorType n_first,
+                         IteratorType last) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+  Impl::expect_valid_range(first, n_first);
+  Impl::expect_valid_range(n_first, last);
+
+  namespace KE                     = ::Kokkos::Experimental;
+  const auto num_elements          = KE::distance(first, last);
+  const auto n_distance_from_first = KE::distance(first, n_first);
+  if (n_distance_from_first <= num_elements / 2) {
+    return rotate_with_pivot_in_left_half(label, ex, first, n_first, last);
+  } else {
+    return rotate_with_pivot_in_right_half(label, ex, first, n_first, last);
+  }
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RotateCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_RotateCopy.hpp
new file mode 100644 (file)
index 0000000..a8111a5
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_ROTATE_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_ROTATE_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIterator, class OutputIterator>
+struct StdRotateCopyFunctor {
+  InputIterator m_first;
+  InputIterator m_last;
+  InputIterator m_first_n;
+  OutputIterator m_dest_first;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const {
+    const IndexType shift = m_last - m_first_n;
+
+    if (i < shift) {
+      m_dest_first[i] = m_first_n[i];
+    } else {
+      m_dest_first[i] = m_first[i - shift];
+    }
+  }
+
+  StdRotateCopyFunctor(InputIterator first, InputIterator last,
+                       InputIterator first_n, OutputIterator dest_first)
+      : m_first(std::move(first)),
+        m_last(std::move(last)),
+        m_first_n(std::move(first_n)),
+        m_dest_first(std::move(dest_first)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator rotate_copy_impl(const std::string& label,
+                                const ExecutionSpace& ex, InputIterator first,
+                                InputIterator n_first, InputIterator last,
+                                OutputIterator d_first) {
+  /*
+    algorithm is implemented as follows:
+
+    first         n_first              last
+    |                |                  |
+    o  o  o  o  o  o  o  o  o  o  o  o
+
+    dest+0 -> first_n
+    dest+1 -> first_n+1
+    dest+2 -> first_n+2
+    dest+3 -> first
+    dest+4 -> first+1
+    dest+5 -> first+2
+    dest+6 -> first+3
+    dest+7 -> first+4
+    dest+8 -> first+5
+    ...
+    let shift = last - first_n;
+
+    then we have:
+    if (i < shift){
+      *(dest_first + i) = *(first_n + i);
+    }
+    else{
+      *(dest_first + i) = *(from + i - shift);
+    }
+  */
+
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+  Impl::expect_valid_range(first, last);
+  Impl::expect_valid_range(first, n_first);
+  Impl::expect_valid_range(n_first, last);
+
+  if (first == last) {
+    return d_first;
+  }
+
+  // aliases
+  using index_type = typename InputIterator::difference_type;
+  using func_type =
+      StdRotateCopyFunctor<index_type, InputIterator, OutputIterator>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_type(first, last, n_first, d_first));
+
+  ex.fence("Kokkos::rotate_copy: fence after operation");
+
+  // return
+  return d_first + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Search.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Search.hpp
new file mode 100644 (file)
index 0000000..2afb0a7
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_SEARCH_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Equal.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class ReducerType, class PredicateType>
+struct StdSearchFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType1 m_first;
+  IteratorType1 m_last;
+  IteratorType2 m_s_first;
+  IteratorType2 m_s_last;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    namespace KE = ::Kokkos::Experimental;
+    auto myit    = m_first + i;
+    bool found   = true;
+
+    const auto search_count = KE::distance(m_s_first, m_s_last);
+    for (IndexType k = 0; k < search_count; ++k) {
+      // note that we add this EXPECT to check if we are in a valid range
+      // but I think we can remove this beceause the guarantee we don't go
+      // out of bounds is taken care of at the calling site
+      // where we launch the par-reduce.
+      KOKKOS_EXPECTS((myit + k) < m_last);
+
+      if (!m_p(myit[k], m_s_first[k])) {
+        found = false;
+        break;
+      }
+    }
+
+    const auto rv =
+        found ? red_value_type{i}
+              : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdSearchFunctor(IteratorType1 first, IteratorType1 last,
+                   IteratorType2 s_first, IteratorType2 s_last,
+                   ReducerType reducer, PredicateType p)
+      : m_first(std::move(first)),
+        m_last(std::move(last)),
+        m_s_first(std::move(s_first)),
+        m_s_last(std::move(s_last)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class BinaryPredicateType>
+IteratorType1 search_impl(const std::string& label, const ExecutionSpace& ex,
+                          IteratorType1 first, IteratorType1 last,
+                          IteratorType2 s_first, IteratorType2 s_last,
+                          const BinaryPredicateType& pred) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, s_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, s_first);
+  Impl::expect_valid_range(first, last);
+  Impl::expect_valid_range(s_first, s_last);
+
+  // the target sequence should not be larger than the range [first, last)
+  namespace KE            = ::Kokkos::Experimental;
+  const auto num_elements = KE::distance(first, last);
+  const auto s_count      = KE::distance(s_first, s_last);
+  KOKKOS_EXPECTS(num_elements >= s_count);
+  (void)s_count;  // needed when macro above is a no-op
+
+  if (s_first == s_last) {
+    return first;
+  }
+
+  if (first == last) {
+    return last;
+  }
+
+  // special case where the two ranges have equal size
+  if (num_elements == s_count) {
+    const auto equal_result = equal_impl(label, ex, first, last, s_first, pred);
+    return (equal_result) ? first : last;
+  } else {
+    using index_type           = typename IteratorType1::difference_type;
+    using reducer_type         = FirstLoc<index_type>;
+    using reduction_value_type = typename reducer_type::value_type;
+    using func_t = StdSearchFunctor<index_type, IteratorType1, IteratorType2,
+                                    reducer_type, BinaryPredicateType>;
+
+    // run
+    reduction_value_type red_result;
+    reducer_type reducer(red_result);
+
+    // decide the size of the range policy of the par_red:
+    // note that the last feasible index to start looking is the index
+    // whose distance from the "last" is equal to the sequence count.
+    // the +1 is because we need to include that location too.
+    const auto range_size = num_elements - s_count + 1;
+
+    // run par reduce
+    ::Kokkos::parallel_reduce(
+        label, RangePolicy<ExecutionSpace>(ex, 0, range_size),
+        func_t(first, last, s_first, s_last, reducer, pred), reducer);
+
+    // fence not needed because reducing into scalar
+
+    // decide and return
+    if (red_result.min_loc_true ==
+        ::Kokkos::reduction_identity<index_type>::min()) {
+      // location has not been found
+      return last;
+    } else {
+      // location has been found
+      return first + red_result.min_loc_true;
+    }
+  }
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType1 search_impl(const std::string& label, const ExecutionSpace& ex,
+                          IteratorType1 first, IteratorType1 last,
+                          IteratorType2 s_first, IteratorType2 s_last) {
+  using value_type1    = typename IteratorType1::value_type;
+  using value_type2    = typename IteratorType2::value_type;
+  using predicate_type = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+  return search_impl(label, ex, first, last, s_first, s_last, predicate_type());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_SearchN.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_SearchN.hpp
new file mode 100644 (file)
index 0000000..cd8b394
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SEARCH_N_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_SEARCH_N_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_AllOfAnyOfNoneOf.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType, class SizeType, class ValueType,
+          class ReducerType, class PredicateType>
+struct StdSearchNFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  IteratorType m_first;
+  IteratorType m_last;
+  SizeType m_count;
+  ValueType m_value;
+  ReducerType m_reducer;
+  PredicateType m_p;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    namespace KE = ::Kokkos::Experimental;
+    auto myit    = m_first + i;
+    bool found   = true;
+
+    for (SizeType k = 0; k < m_count; ++k) {
+      // note that we add this EXPECT to check if we are in a valid range
+      // but I think we can remove this beceause the guarantee we don't go
+      // out of bounds is taken care of at the calling site
+      // where we launch the par-reduce.
+      KOKKOS_EXPECTS((myit + k) < m_last);
+
+      if (!m_p(myit[k], m_value)) {
+        found = false;
+        break;
+      }
+    }
+
+    const auto rv =
+        found ? red_value_type{i}
+              : red_value_type{::Kokkos::reduction_identity<IndexType>::min()};
+
+    m_reducer.join(red_value, rv);
+  }
+
+  KOKKOS_FUNCTION
+  StdSearchNFunctor(IteratorType first, IteratorType last, SizeType count,
+                    ValueType value, ReducerType reducer, PredicateType p)
+      : m_first(std::move(first)),
+        m_last(std::move(last)),
+        m_count(std::move(count)),
+        m_value(std::move(value)),
+        m_reducer(std::move(reducer)),
+        m_p(std::move(p)) {}
+};
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class ValueType, class BinaryPredicateType>
+IteratorType search_n_impl(const std::string& label, const ExecutionSpace& ex,
+                           IteratorType first, IteratorType last,
+                           SizeType count, const ValueType& value,
+                           const BinaryPredicateType& pred) {
+  // checks
+  static_assert_random_access_and_accessible(ex, first);
+  expect_valid_range(first, last);
+  KOKKOS_EXPECTS((std::ptrdiff_t)count >= 0);
+
+  // count should not be larger than the range [first, last)
+  namespace KE            = ::Kokkos::Experimental;
+  const auto num_elements = KE::distance(first, last);
+  // cast things to avoid compiler warning
+  KOKKOS_EXPECTS((std::size_t)num_elements >= (std::size_t)count);
+
+  if (first == last) {
+    return first;
+  }
+
+  // special case where num elements in [first, last) == count
+  if ((std::size_t)num_elements == (std::size_t)count) {
+    using equal_to_value = StdAlgoEqualsValUnaryPredicate<ValueType>;
+    const auto satisfies =
+        all_of_impl(label, ex, first, last, equal_to_value(value));
+    return (satisfies) ? first : last;
+  } else {
+    // aliases
+    using index_type           = typename IteratorType::difference_type;
+    using reducer_type         = FirstLoc<index_type>;
+    using reduction_value_type = typename reducer_type::value_type;
+    using func_t =
+        StdSearchNFunctor<index_type, IteratorType, SizeType, ValueType,
+                          reducer_type, BinaryPredicateType>;
+
+    // run
+    reduction_value_type red_result;
+    reducer_type reducer(red_result);
+
+    // decide the size of the range policy of the par_red:
+    // the last feasible index to start looking is the index
+    // whose distance from the "last" is equal to count.
+    // the +1 is because we need to include that location too.
+    const auto range_size = num_elements - count + 1;
+
+    // run par reduce
+    ::Kokkos::parallel_reduce(
+        label, RangePolicy<ExecutionSpace>(ex, 0, range_size),
+        func_t(first, last, count, value, reducer, pred), reducer);
+
+    // fence not needed because reducing into scalar
+
+    // decide and return
+    if (red_result.min_loc_true ==
+        ::Kokkos::reduction_identity<index_type>::min()) {
+      // location has not been found
+      return last;
+    } else {
+      // location has been found
+      return first + red_result.min_loc_true;
+    }
+  }
+}
+
+template <class ExecutionSpace, class IteratorType, class SizeType,
+          class ValueType>
+IteratorType search_n_impl(const std::string& label, const ExecutionSpace& ex,
+                           IteratorType first, IteratorType last,
+                           SizeType count, const ValueType& value) {
+  using iter_value_type = typename IteratorType::value_type;
+  using predicate_type =
+      StdAlgoEqualBinaryPredicate<iter_value_type, ValueType>;
+
+  /* above we use <iter_value_type, ValueType> for the predicate_type
+     to be consistent with the standard, which says:
+
+     "
+     The signature of the predicate function should be equivalent to:
+
+        bool pred(const Type1 &a, const Type2 &b);
+
+     The type Type1 must be such that an object of type ForwardIt can be
+     dereferenced and then implicitly converted to Type1. The type Type2 must be
+     such that an object of type T can be implicitly converted to Type2.
+     "
+
+     In our case, IteratorType = ForwardIt, and ValueType = T.
+   */
+
+  return search_n_impl(label, ex, first, last, count, value, predicate_type());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ShiftLeft.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ShiftLeft.hpp
new file mode 100644 (file)
index 0000000..7968644
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_SHIFT_LEFT_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Move.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType shift_left_impl(const std::string& label, const ExecutionSpace& ex,
+                             IteratorType first, IteratorType last,
+                             typename IteratorType::difference_type n) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+  KOKKOS_EXPECTS(n >= 0);
+
+  // handle trivial cases
+  if (n == 0) {
+    return last;
+  }
+
+  if (n >= Kokkos::Experimental::distance(first, last)) {
+    return first;
+  }
+
+  /*
+    Suppose that n = 5, and our [first,last) spans:
+
+    | 0  | 1  |  2 | 1  | 2  | 1  | 2  | 2  | 10 | -3 | 1  | -6 | *
+      ^                                                          ^
+    first                                                       last
+
+    shift_left modifies the range such that we have this data:
+    | 1  | 2  | 2  | 10  | -3 | 1  | -6 | x | x  | x  | x  |  x | *
+                                          ^
+                                   return it pointing here
+
+
+    and returns an iterator pointing to one past the new end.
+    Note: elements marked x are in undefined state because have been moved.
+
+    We implement this in two steps:
+    step 1:
+      we create a temporary view with extent = distance(first+n, last)
+      and *move* assign the elements from [first+n, last) to tmp view, such that
+      tmp view becomes:
+
+      | 1  | 2  | 2  | 10  | -3 | 1  | -6 |
+
+    step 2:
+      move elements of tmp view back to range starting at first.
+   */
+
+  const auto num_elements_to_move =
+      ::Kokkos::Experimental::distance(first + n, last);
+
+  // create tmp view
+  using value_type    = typename IteratorType::value_type;
+  using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
+  tmp_view_type tmp_view("shift_left_impl", num_elements_to_move);
+  using tmp_readwrite_iterator_type = decltype(begin(tmp_view));
+
+  using index_type = typename IteratorType::difference_type;
+
+  // step 1
+  using step1_func_type =
+      StdMoveFunctor<index_type, IteratorType, tmp_readwrite_iterator_type>;
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_to_move),
+      step1_func_type(first + n, begin(tmp_view)));
+
+  // step 2
+  using step2_func_type =
+      StdMoveFunctor<index_type, tmp_readwrite_iterator_type, IteratorType>;
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, tmp_view.extent(0)),
+                         step2_func_type(begin(tmp_view), first));
+
+  ex.fence("Kokkos::shift_left: fence after operation");
+
+  return last - n;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ShiftRight.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ShiftRight.hpp
new file mode 100644 (file)
index 0000000..0ffde42
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_SHIFT_RIGHT_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Move.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType shift_right_impl(const std::string& label,
+                              const ExecutionSpace& ex, IteratorType first,
+                              IteratorType last,
+                              typename IteratorType::difference_type n) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+  KOKKOS_EXPECTS(n >= 0);
+
+  // handle trivial cases
+  if (n == 0) {
+    return first;
+  }
+
+  if (n >= Kokkos::Experimental::distance(first, last)) {
+    return last;
+  }
+
+  /*
+    Suppose that n = 3, and [first,last) spans:
+
+    | 0  | 1  |  2 | 1  | 2  | 1  | 2  | 2  | 10 | -3 | 1  | -6 | *
+      ^                                                          ^
+    first                                                       last
+
+    shift_right modifies the range such that we have this data:
+    |  x | x  | x  | 0  | 1  |  2 | 1  | 2  | 1  | 2  | 2  | 10 | *
+                     ^
+             return it points here
+
+    and returns an iterator pointing to the new beginning.
+    Note: elements marked x are in undefined state because have been moved.
+
+    We implement this in two steps:
+    step 1:
+      we create a temporary view with extent = distance(first, last-n)
+      and *move* assign the elements from [first, last-n) to tmp view, such that
+      tmp view becomes:
+
+      | 0  | 1  |  2 | 1  | 2  | 1  | 2  | 2  | 10 |
+
+    step 2:
+      move elements of tmp view back to range starting at first+n.
+   */
+
+  const auto num_elements_to_move =
+      ::Kokkos::Experimental::distance(first, last - n);
+
+  // create tmp view
+  using value_type    = typename IteratorType::value_type;
+  using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
+  tmp_view_type tmp_view("shift_right_impl", num_elements_to_move);
+  using tmp_readwrite_iterator_type = decltype(begin(tmp_view));
+
+  using index_type = typename IteratorType::difference_type;
+
+  // step 1
+  using step1_func_type =
+      StdMoveFunctor<index_type, IteratorType, tmp_readwrite_iterator_type>;
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_to_move),
+      step1_func_type(first, begin(tmp_view)));
+
+  // step 2
+  using step2_func_type =
+      StdMoveFunctor<index_type, tmp_readwrite_iterator_type, IteratorType>;
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, tmp_view.extent(0)),
+                         step2_func_type(begin(tmp_view), first + n));
+
+  ex.fence("Kokkos::shift_right: fence after operation");
+
+  return first + n;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_SwapRanges.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_SwapRanges.hpp
new file mode 100644 (file)
index 0000000..3e6ca14
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_SWAP_RANGES_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_SWAP_RANGES_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <std_algorithms/Kokkos_Swap.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class IteratorType1, class IteratorType2>
+struct StdSwapRangesFunctor {
+  IteratorType1 m_first1;
+  IteratorType2 m_first2;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const {
+    // the swap below is doing the same thing, but
+    // for Intel 18.0.5 does not work.
+    // But putting the impl directly here, it works.
+#ifdef KOKKOS_COMPILER_INTEL
+    typename IteratorType1::value_type tmp = std::move(m_first1[i]);
+    m_first1[i]                            = std::move(m_first2[i]);
+    m_first2[i]                            = std::move(tmp);
+#else
+    ::Kokkos::Experimental::swap(m_first1[i], m_first2[i]);
+#endif
+  }
+
+  KOKKOS_FUNCTION
+  StdSwapRangesFunctor(IteratorType1 _first1, IteratorType2 _first2)
+      : m_first1(std::move(_first1)), m_first2(std::move(_first2)) {}
+};
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2>
+IteratorType2 swap_ranges_impl(const std::string& label,
+                               const ExecutionSpace& ex, IteratorType1 first1,
+                               IteratorType1 last1, IteratorType2 first2) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+  Impl::expect_valid_range(first1, last1);
+
+  // aliases
+  using index_type = typename IteratorType1::difference_type;
+  using func_t = StdSwapRangesFunctor<index_type, IteratorType1, IteratorType2>;
+
+  // run
+  const auto num_elements_to_swap =
+      Kokkos::Experimental::distance(first1, last1);
+  ::Kokkos::parallel_for(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements_to_swap),
+      func_t(first1, first2));
+  ex.fence("Kokkos::swap_ranges: fence after operation");
+
+  // return
+  return first2 + num_elements_to_swap;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Transform.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Transform.hpp
new file mode 100644 (file)
index 0000000..5d2c0cc
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIterator, class OutputIterator,
+          class UnaryFunctorType>
+struct StdTransformFunctor {
+  InputIterator m_first;
+  OutputIterator m_d_first;
+  UnaryFunctorType m_unary_op;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const { m_d_first[i] = m_unary_op(m_first[i]); }
+
+  KOKKOS_FUNCTION
+  StdTransformFunctor(InputIterator _first, OutputIterator _m_d_first,
+                      UnaryFunctorType _functor)
+      : m_first(std::move(_first)),
+        m_d_first(std::move(_m_d_first)),
+        m_unary_op(std::move(_functor)) {}
+};
+
+template <class IndexType, class InputIterator1, class InputIterator2,
+          class OutputIterator, class BinaryFunctorType>
+struct StdTransformBinaryFunctor {
+  InputIterator1 m_first1;
+  InputIterator2 m_first2;
+  OutputIterator m_d_first;
+  BinaryFunctorType m_binary_op;
+
+  KOKKOS_FUNCTION
+  void operator()(IndexType i) const {
+    m_d_first[i] = m_binary_op(m_first1[i], m_first2[i]);
+  }
+
+  KOKKOS_FUNCTION
+  StdTransformBinaryFunctor(InputIterator1 _first1, InputIterator2 _first2,
+                            OutputIterator _m_d_first,
+                            BinaryFunctorType _functor)
+      : m_first1(std::move(_first1)),
+        m_first2(std::move(_first2)),
+        m_d_first(std::move(_m_d_first)),
+        m_binary_op(std::move(_functor)) {}
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class UnaryOperation>
+OutputIterator transform_impl(const std::string& label,
+                              const ExecutionSpace& ex, InputIterator first1,
+                              InputIterator last1, OutputIterator d_first,
+                              UnaryOperation unary_op) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, d_first);
+  Impl::expect_valid_range(first1, last1);
+
+  // aliases
+  using index_type = typename InputIterator::difference_type;
+  using func_t = StdTransformFunctor<index_type, InputIterator, OutputIterator,
+                                     UnaryOperation>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first1, d_first, unary_op));
+  ex.fence("Kokkos::transform: fence after operation");
+
+  // return
+  return d_first + num_elements;
+}
+
+template <class ExecutionSpace, class InputIterator1, class InputIterator2,
+          class OutputIterator, class BinaryOperation>
+OutputIterator transform_impl(const std::string& label,
+                              const ExecutionSpace& ex, InputIterator1 first1,
+                              InputIterator1 last1, InputIterator2 first2,
+                              OutputIterator d_first,
+                              BinaryOperation binary_op) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, first2, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, first2,
+                                                              d_first);
+  Impl::expect_valid_range(first1, last1);
+
+  // aliases
+  using index_type = typename InputIterator1::difference_type;
+  using func_t =
+      StdTransformBinaryFunctor<index_type, InputIterator1, InputIterator2,
+                                OutputIterator, BinaryOperation>;
+
+  // run
+  const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+  ::Kokkos::parallel_for(label,
+                         RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                         func_t(first1, first2, d_first, binary_op));
+  ex.fence("Kokkos::transform: fence after operation");
+  return d_first + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformExclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformExclusiveScan.hpp
new file mode 100644 (file)
index 0000000..9fb8cbc
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_EXCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_EXCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+          class FirstDest, class BinaryOpType, class UnaryOpType>
+struct TransformExclusiveScanFunctor {
+  using execution_space = ExeSpace;
+  using value_type =
+      ::Kokkos::Experimental::Impl::ValueWrapperForNoNeutralElement<ValueType>;
+
+  ValueType m_init_value;
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+  BinaryOpType m_binary_op;
+  UnaryOpType m_unary_op;
+
+  KOKKOS_FUNCTION
+  TransformExclusiveScanFunctor(ValueType init, FirstFrom first_from,
+                                FirstDest first_dest, BinaryOpType bop,
+                                UnaryOpType uop)
+      : m_init_value(std::move(init)),
+        m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_binary_op(std::move(bop)),
+        m_unary_op(std::move(uop)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, value_type& update,
+                  const bool final_pass) const {
+    if (final_pass) {
+      if (i == 0) {
+        // for both ExclusiveScan and TransformExclusiveScan,
+        // init is unmodified
+        m_first_dest[i] = m_init_value;
+      } else {
+        m_first_dest[i] = m_binary_op(update.val, m_init_value);
+      }
+    }
+
+    const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
+    this->join(update, tmp);
+  }
+
+  KOKKOS_FUNCTION
+  void init(value_type& update) const {
+    update.val        = {};
+    update.is_initial = true;
+  }
+
+  KOKKOS_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    if (update.is_initial) {
+      update.val = input.val;
+    } else {
+      update.val = m_binary_op(update.val, input.val);
+    }
+    update.is_initial = false;
+  }
+};
+
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class ValueType, class BinaryOpType,
+          class UnaryOpType>
+OutputIteratorType transform_exclusive_scan_impl(
+    const std::string& label, const ExecutionSpace& ex,
+    InputIteratorType first_from, InputIteratorType last_from,
+    OutputIteratorType first_dest, ValueType init_value, BinaryOpType bop,
+    UnaryOpType uop) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using func_type =
+      TransformExclusiveScanFunctor<ExecutionSpace, index_type, ValueType,
+                                    InputIteratorType, OutputIteratorType,
+                                    BinaryOpType, UnaryOpType>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_type(init_value, first_from, first_dest, bop, uop));
+  ex.fence("Kokkos::transform_exclusive_scan: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformInclusiveScan.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformInclusiveScan.hpp
new file mode 100644 (file)
index 0000000..281eb6f
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_INCLUSIVE_SCAN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_ValueWrapperForNoNeutralElement.hpp"
+#include "Kokkos_IdentityReferenceUnaryFunctor.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+          class FirstDest, class BinaryOpType, class UnaryOpType>
+struct TransformInclusiveScanNoInitValueFunctor {
+  using execution_space = ExeSpace;
+  using value_type      = ValueWrapperForNoNeutralElement<ValueType>;
+
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+  BinaryOpType m_binary_op;
+  UnaryOpType m_unary_op;
+
+  KOKKOS_FUNCTION
+  TransformInclusiveScanNoInitValueFunctor(FirstFrom first_from,
+                                           FirstDest first_dest,
+                                           BinaryOpType bop, UnaryOpType uop)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_binary_op(std::move(bop)),
+        m_unary_op(std::move(uop)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, value_type& update,
+                  const bool final_pass) const {
+    const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
+    this->join(update, tmp);
+    if (final_pass) {
+      m_first_dest[i] = update.val;
+    }
+  }
+
+  KOKKOS_FUNCTION
+  void init(value_type& update) const {
+    update.val        = {};
+    update.is_initial = true;
+  }
+
+  KOKKOS_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    if (update.is_initial) {
+      update.val = input.val;
+    } else {
+      update.val = m_binary_op(update.val, input.val);
+    }
+    update.is_initial = false;
+  }
+};
+
+template <class ExeSpace, class IndexType, class ValueType, class FirstFrom,
+          class FirstDest, class BinaryOpType, class UnaryOpType>
+struct TransformInclusiveScanWithInitValueFunctor {
+  using execution_space = ExeSpace;
+  using value_type      = ValueWrapperForNoNeutralElement<ValueType>;
+
+  FirstFrom m_first_from;
+  FirstDest m_first_dest;
+  BinaryOpType m_binary_op;
+  UnaryOpType m_unary_op;
+  ValueType m_init;
+
+  KOKKOS_FUNCTION
+  TransformInclusiveScanWithInitValueFunctor(FirstFrom first_from,
+                                             FirstDest first_dest,
+                                             BinaryOpType bop, UnaryOpType uop,
+                                             ValueType init)
+      : m_first_from(std::move(first_from)),
+        m_first_dest(std::move(first_dest)),
+        m_binary_op(std::move(bop)),
+        m_unary_op(std::move(uop)),
+        m_init(std::move(init)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, value_type& update,
+                  const bool final_pass) const {
+    const auto tmp = value_type{m_unary_op(m_first_from[i]), false};
+    this->join(update, tmp);
+
+    if (final_pass) {
+      m_first_dest[i] = m_binary_op(update.val, m_init);
+    }
+  }
+
+  KOKKOS_FUNCTION
+  void init(value_type& update) const {
+    update.val        = {};
+    update.is_initial = true;
+  }
+
+  KOKKOS_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    if (update.is_initial) {
+      update.val = input.val;
+    } else {
+      update.val = m_binary_op(update.val, input.val);
+    }
+    update.is_initial = false;
+  }
+};
+
+// -------------------------------------------------------------
+// transform_inclusive_scan_impl without init_value
+// -------------------------------------------------------------
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType, class UnaryOpType>
+OutputIteratorType transform_inclusive_scan_impl(const std::string& label,
+                                                 const ExecutionSpace& ex,
+                                                 InputIteratorType first_from,
+                                                 InputIteratorType last_from,
+                                                 OutputIteratorType first_dest,
+                                                 BinaryOpType binary_op,
+                                                 UnaryOpType unary_op) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using value_type =
+      std::remove_const_t<typename InputIteratorType::value_type>;
+  using func_type = TransformInclusiveScanNoInitValueFunctor<
+      ExecutionSpace, index_type, value_type, InputIteratorType,
+      OutputIteratorType, BinaryOpType, UnaryOpType>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_type(first_from, first_dest, binary_op, unary_op));
+  ex.fence("Kokkos::transform_inclusive_scan: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+// -------------------------------------------------------------
+// transform_inclusive_scan_impl with init_value
+// -------------------------------------------------------------
+template <class ExecutionSpace, class InputIteratorType,
+          class OutputIteratorType, class BinaryOpType, class UnaryOpType,
+          class ValueType>
+OutputIteratorType transform_inclusive_scan_impl(
+    const std::string& label, const ExecutionSpace& ex,
+    InputIteratorType first_from, InputIteratorType last_from,
+    OutputIteratorType first_dest, BinaryOpType binary_op, UnaryOpType unary_op,
+    ValueType init_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first_from, first_dest);
+  Impl::static_assert_iterators_have_matching_difference_type(first_from,
+                                                              first_dest);
+  Impl::expect_valid_range(first_from, last_from);
+
+  // aliases
+  using index_type = typename InputIteratorType::difference_type;
+  using func_type  = TransformInclusiveScanWithInitValueFunctor<
+      ExecutionSpace, index_type, ValueType, InputIteratorType,
+      OutputIteratorType, BinaryOpType, UnaryOpType>;
+
+  // run
+  const auto num_elements =
+      Kokkos::Experimental::distance(first_from, last_from);
+  ::Kokkos::parallel_scan(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      func_type(first_from, first_dest, binary_op, unary_op, init_value));
+  ex.fence("Kokkos::transform_inclusive_scan: fence after operation");
+
+  // return
+  return first_dest + num_elements;
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformReduce.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_TransformReduce.hpp
new file mode 100644 (file)
index 0000000..e3a780f
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_TRANSFORM_REDUCE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class ValueType>
+struct StdTranformReduceDefaultBinaryTransformFunctor {
+  KOKKOS_FUNCTION
+  constexpr ValueType operator()(const ValueType& a, const ValueType& b) const {
+    return (a * b);
+  }
+};
+
+template <class ValueType>
+struct StdTranformReduceDefaultJoinFunctor {
+  KOKKOS_FUNCTION
+  constexpr ValueType operator()(const ValueType& a, const ValueType& b) const {
+    return a + b;
+  }
+};
+
+template <class IteratorType, class ReducerType, class TransformType>
+struct StdTransformReduceSingleIntervalFunctor {
+  using red_value_type = typename ReducerType::value_type;
+  using index_type     = typename IteratorType::difference_type;
+
+  const IteratorType m_first;
+  const ReducerType m_reducer;
+  const TransformType m_transform;
+
+  KOKKOS_FUNCTION
+  void operator()(const index_type i, red_value_type& red_value) const {
+    auto tmp_wrapped_value = red_value_type{m_transform(m_first[i]), false};
+    if (red_value.is_initial) {
+      red_value = tmp_wrapped_value;
+    } else {
+      m_reducer.join(red_value, tmp_wrapped_value);
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdTransformReduceSingleIntervalFunctor(IteratorType first,
+                                          ReducerType reducer,
+                                          TransformType transform)
+      : m_first(std::move(first)),
+        m_reducer(std::move(reducer)),
+        m_transform(std::move(transform)) {}
+};
+
+template <class IndexType, class IteratorType1, class IteratorType2,
+          class ReducerType, class TransformType>
+struct StdTransformReduceTwoIntervalsFunctor {
+  using red_value_type = typename ReducerType::value_type;
+
+  const IteratorType1 m_first1;
+  const IteratorType2 m_first2;
+  const ReducerType m_reducer;
+  const TransformType m_transform;
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, red_value_type& red_value) const {
+    auto tmp_wrapped_value =
+        red_value_type{m_transform(m_first1[i], m_first2[i]), false};
+
+    if (red_value.is_initial) {
+      red_value = tmp_wrapped_value;
+    } else {
+      m_reducer.join(red_value, tmp_wrapped_value);
+    }
+  }
+
+  KOKKOS_FUNCTION
+  StdTransformReduceTwoIntervalsFunctor(IteratorType1 first1,
+                                        IteratorType2 first2,
+                                        ReducerType reducer,
+                                        TransformType transform)
+      : m_first1(std::move(first1)),
+        m_first2(std::move(first2)),
+        m_reducer(std::move(reducer)),
+        m_transform(std::move(transform)) {}
+};
+
+//------------------------------
+//
+// impl functions
+//
+//------------------------------
+
+template <class ExecutionSpace, class IteratorType, class ValueType,
+          class JoinerType, class UnaryTransformerType>
+ValueType transform_reduce_custom_functors_impl(
+    const std::string& label, const ExecutionSpace& ex, IteratorType first,
+    IteratorType last, ValueType init_reduction_value, JoinerType joiner,
+    UnaryTransformerType transformer) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::expect_valid_range(first, last);
+
+  if (first == last) {
+    // init is returned, unmodified
+    return init_reduction_value;
+  }
+
+  // aliases
+  using reducer_type =
+      ReducerWithArbitraryJoinerNoNeutralElement<ValueType, JoinerType>;
+  using functor_type =
+      StdTransformReduceSingleIntervalFunctor<IteratorType, reducer_type,
+                                              UnaryTransformerType>;
+  using reduction_value_type = typename reducer_type::value_type;
+
+  // run
+  reduction_value_type result;
+  reducer_type reducer(result, joiner);
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  ::Kokkos::parallel_reduce(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+                            functor_type(first, reducer, transformer), reducer);
+
+  // fence not needed since reducing into scalar
+
+  // as per standard, transform is not applied to the init value
+  // https://en.cppreference.com/w/cpp/algorithm/transform_reduce
+  return joiner(result.val, init_reduction_value);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ValueType, class JoinerType, class BinaryTransformerType>
+ValueType transform_reduce_custom_functors_impl(
+    const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+    IteratorType1 last1, IteratorType2 first2, ValueType init_reduction_value,
+    JoinerType joiner, BinaryTransformerType transformer) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+  Impl::expect_valid_range(first1, last1);
+
+  if (first1 == last1) {
+    // init is returned, unmodified
+    return init_reduction_value;
+  }
+
+  // aliases
+  using index_type = typename IteratorType1::difference_type;
+  using reducer_type =
+      ReducerWithArbitraryJoinerNoNeutralElement<ValueType, JoinerType>;
+  using functor_type =
+      StdTransformReduceTwoIntervalsFunctor<index_type, IteratorType1,
+                                            IteratorType2, reducer_type,
+                                            BinaryTransformerType>;
+  using reduction_value_type = typename reducer_type::value_type;
+
+  // run
+  reduction_value_type result;
+  reducer_type reducer(result, joiner);
+
+  const auto num_elements = Kokkos::Experimental::distance(first1, last1);
+  ::Kokkos::parallel_reduce(
+      label, RangePolicy<ExecutionSpace>(ex, 0, num_elements),
+      functor_type(first1, first2, reducer, transformer), reducer);
+
+  // fence not needed since reducing into scalar
+  return joiner(result.val, init_reduction_value);
+}
+
+template <class ExecutionSpace, class IteratorType1, class IteratorType2,
+          class ValueType>
+ValueType transform_reduce_default_functors_impl(
+    const std::string& label, const ExecutionSpace& ex, IteratorType1 first1,
+    IteratorType1 last1, IteratorType2 first2, ValueType init_reduction_value) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first1, first2);
+  Impl::static_assert_is_not_openmptarget(ex);
+  Impl::static_assert_iterators_have_matching_difference_type(first1, first2);
+  Impl::expect_valid_range(first1, last1);
+
+  // aliases
+  using transformer_type =
+      Impl::StdTranformReduceDefaultBinaryTransformFunctor<ValueType>;
+  using joiner_type = Impl::StdTranformReduceDefaultJoinFunctor<ValueType>;
+
+  return transform_reduce_custom_functors_impl(
+      label, ex, first1, last1, first2, std::move(init_reduction_value),
+      joiner_type(), transformer_type());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Unique.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_Unique.hpp
new file mode 100644 (file)
index 0000000..5e4ea7d
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_UNIQUE_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include <std_algorithms/Kokkos_Move.hpp>
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <std_algorithms/Kokkos_AdjacentFind.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIt, class OutputIt,
+          class BinaryPredicateType>
+struct StdUniqueFunctor {
+  InputIt m_first_from;
+  InputIt m_last_from;
+  OutputIt m_first_dest;
+  BinaryPredicateType m_pred;
+
+  KOKKOS_FUNCTION
+  StdUniqueFunctor(InputIt first_from, InputIt last_from, OutputIt first_dest,
+                   BinaryPredicateType pred)
+      : m_first_from(std::move(first_from)),
+        m_last_from(std::move(last_from)),
+        m_first_dest(std::move(first_dest)),
+        m_pred(std::move(pred)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, IndexType& update,
+                  const bool final_pass) const {
+    auto& val_i         = m_first_from[i];
+    const auto& val_ip1 = m_first_from[i + 1];
+
+    if (final_pass) {
+      if (!m_pred(val_i, val_ip1)) {
+        m_first_dest[update] = std::move(val_i);
+      }
+    }
+
+    if (!m_pred(val_i, val_ip1)) {
+      update += 1;
+    }
+  }
+};
+
+template <class ExecutionSpace, class IteratorType, class PredicateType>
+IteratorType unique_impl(const std::string& label, const ExecutionSpace& ex,
+                         IteratorType first, IteratorType last,
+                         PredicateType pred) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first);
+  Impl::expect_valid_range(first, last);
+
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  if (num_elements == 0) {
+    return first;
+  } else if (num_elements == 1) {
+    return last;
+  } else {
+    // ----------
+    // step 1:
+    // find first location of adjacent equal elements
+    // ----------
+    auto it_found =
+        ::Kokkos::Experimental::adjacent_find(ex, first, last, pred);
+
+    // if none, all elements are unique, so nothing to do
+    if (it_found == last) {
+      return last;
+    } else {
+      // if here, we found some equal adjacent elements,
+      // so count all preceeding unique elements
+      const auto num_unique_found_in_step_one = it_found - first;
+
+      // ----------
+      // step 2:
+      // ----------
+      // since we found some unique elements, we don't need to explore
+      // the full range [first, last), but only need to focus on the
+      // remaining range [it_found, last)
+      const auto num_elements_to_explore = last - it_found;
+
+      // create a tmp view to use to *move* all unique elements
+      // using the same algorithm used for unique_copy but we now move things
+      using value_type    = typename IteratorType::value_type;
+      using tmp_view_type = Kokkos::View<value_type*, ExecutionSpace>;
+      tmp_view_type tmp_view("std_unique_tmp_view", num_elements_to_explore);
+
+      // scan extent is: num_elements_to_explore - 1
+      // for same reason as the one explained in unique_copy
+      const auto scan_size = num_elements_to_explore - 1;
+      auto tmp_first       = ::Kokkos::Experimental::begin(tmp_view);
+      using output_it      = decltype(tmp_first);
+
+      using index_type = typename IteratorType::difference_type;
+      using func_type =
+          StdUniqueFunctor<index_type, IteratorType, output_it, PredicateType>;
+      index_type count = 0;
+      ::Kokkos::parallel_scan(
+          label, RangePolicy<ExecutionSpace>(ex, 0, scan_size),
+          func_type(it_found, last, tmp_first, pred), count);
+
+      // move last element too, for the same reason as the unique_copy
+      auto unused_r =
+          Impl::move_impl("Kokkos::move_from_unique", ex, it_found + scan_size,
+                          last, tmp_first + count);
+      (void)unused_r;  // r1 not used
+
+      // ----------
+      // step 3
+      // ----------
+      // move back from tmp to original range,
+      // ensuring we start overwriting after the original unique found
+      using tmp_readwrite_iterator_type = decltype(begin(tmp_view));
+      using step3_func_t =
+          StdMoveFunctor<index_type, tmp_readwrite_iterator_type, IteratorType>;
+
+      ::Kokkos::parallel_for(
+          "unique_step3_parfor",
+          RangePolicy<ExecutionSpace>(ex, 0, tmp_view.extent(0)),
+          step3_func_t(begin(tmp_view),
+                       (first + num_unique_found_in_step_one)));
+
+      ex.fence("Kokkos::unique: fence after operation");
+
+      // return iterator to one passed the last written
+      // (the +1 is needed to account for the last element, see above)
+      return (first + num_unique_found_in_step_one + count + 1);
+    }
+  }
+}
+
+template <class ExecutionSpace, class IteratorType>
+IteratorType unique_impl(const std::string& label, const ExecutionSpace& ex,
+                         IteratorType first, IteratorType last) {
+  using value_type    = typename IteratorType::value_type;
+  using binary_pred_t = StdAlgoEqualBinaryPredicate<value_type>;
+  return unique_impl(label, ex, first, last, binary_pred_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_UniqueCopy.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_UniqueCopy.hpp
new file mode 100644 (file)
index 0000000..e4fd6f3
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_IMPL_HPP
+#define KOKKOS_STD_ALGORITHMS_UNIQUE_COPY_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include "Kokkos_Constraints.hpp"
+#include "Kokkos_HelperPredicates.hpp"
+#include "Kokkos_CopyCopyN.hpp"
+#include <std_algorithms/Kokkos_Distance.hpp>
+#include <string>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+template <class IndexType, class InputIt, class OutputIt,
+          class BinaryPredicateType>
+struct StdUniqueCopyFunctor {
+  InputIt m_first_from;
+  InputIt m_last_from;
+  OutputIt m_first_dest;
+  BinaryPredicateType m_pred;
+
+  KOKKOS_FUNCTION
+  StdUniqueCopyFunctor(InputIt first_from, InputIt last_from,
+                       OutputIt first_dest, BinaryPredicateType pred)
+      : m_first_from(std::move(first_from)),
+        m_last_from(std::move(last_from)),
+        m_first_dest(std::move(first_dest)),
+        m_pred(std::move(pred)) {}
+
+  KOKKOS_FUNCTION
+  void operator()(const IndexType i, IndexType& update,
+                  const bool final_pass) const {
+    const auto& val_i   = m_first_from[i];
+    const auto& val_ip1 = m_first_from[i + 1];
+
+    if (final_pass) {
+      if (!m_pred(val_i, val_ip1)) {
+        m_first_dest[update] = val_i;
+      }
+    }
+
+    if (!m_pred(val_i, val_ip1)) {
+      update += 1;
+    }
+  }
+};
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator,
+          class PredicateType>
+OutputIterator unique_copy_impl(const std::string& label,
+                                const ExecutionSpace& ex, InputIterator first,
+                                InputIterator last, OutputIterator d_first,
+                                PredicateType pred) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+  Impl::expect_valid_range(first, last);
+
+  // branch for trivial vs non trivial case
+  const auto num_elements = Kokkos::Experimental::distance(first, last);
+  if (num_elements == 0) {
+    return d_first;
+  } else if (num_elements == 1) {
+    return Impl::copy_impl("Kokkos::copy_from_unique_copy", ex, first, last,
+                           d_first);
+  } else {
+    // aliases
+    using index_type = typename InputIterator::difference_type;
+    using func_type  = StdUniqueCopyFunctor<index_type, InputIterator,
+                                           OutputIterator, PredicateType>;
+
+    // note here that we run scan for num_elements - 1
+    // because of the way we implement this, the last element is always needed.
+    // We avoid performing checks inside functor that we are within limits
+    // and run a "safe" scan and then copy the last element.
+    const auto scan_size = num_elements - 1;
+    index_type count     = 0;
+    ::Kokkos::parallel_scan(label,
+                            RangePolicy<ExecutionSpace>(ex, 0, scan_size),
+                            func_type(first, last, d_first, pred), count);
+
+    return Impl::copy_impl("Kokkos::copy_from_unique_copy", ex,
+                           first + scan_size, last, d_first + count);
+  }
+}
+
+template <class ExecutionSpace, class InputIterator, class OutputIterator>
+OutputIterator unique_copy_impl(const std::string& label,
+                                const ExecutionSpace& ex, InputIterator first,
+                                InputIterator last, OutputIterator d_first) {
+  // checks
+  Impl::static_assert_random_access_and_accessible(ex, first, d_first);
+  Impl::static_assert_iterators_have_matching_difference_type(first, d_first);
+  Impl::expect_valid_range(first, last);
+
+  // aliases
+  using value_type1 = typename InputIterator::value_type;
+  using value_type2 = typename OutputIterator::value_type;
+
+  // default binary predicate uses ==
+  using binary_pred_t = StdAlgoEqualBinaryPredicate<value_type1, value_type2>;
+
+  // run
+  return unique_copy_impl(label, ex, first, last, d_first, binary_pred_t());
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ValueWrapperForNoNeutralElement.hpp b/bundled/kokkos-3.7.00/algorithms/src/std_algorithms/impl/Kokkos_ValueWrapperForNoNeutralElement.hpp
new file mode 100644 (file)
index 0000000..da9b6ef
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STD_ALGORITHMS_VALUE_WRAPPER_FOR_NO_NEUTRAL_ELEMENT_HPP
+#define KOKKOS_STD_ALGORITHMS_VALUE_WRAPPER_FOR_NO_NEUTRAL_ELEMENT_HPP
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+//
+// scalar wrapper used for reductions and scans
+// when we don't have neutral element
+//
+template <class Scalar>
+struct ValueWrapperForNoNeutralElement {
+  Scalar val;
+  bool is_initial = true;
+
+  KOKKOS_FUNCTION
+  void operator=(const ValueWrapperForNoNeutralElement& rhs) {
+    val        = rhs.val;
+    is_initial = rhs.is_initial;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_Bitset.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_Bitset.hpp
new file mode 100644 (file)
index 0000000..05121b4
--- /dev/null
@@ -0,0 +1,456 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_BITSET_HPP
+#define KOKKOS_BITSET_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_BITSET
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <Kokkos_Functional.hpp>
+
+#include <impl/Kokkos_Bitset_impl.hpp>
+
+namespace Kokkos {
+
+template <typename Device = Kokkos::DefaultExecutionSpace>
+class Bitset;
+
+template <typename Device = Kokkos::DefaultExecutionSpace>
+class ConstBitset;
+
+template <typename DstDevice, typename SrcDevice>
+void deep_copy(Bitset<DstDevice>& dst, Bitset<SrcDevice> const& src);
+
+template <typename DstDevice, typename SrcDevice>
+void deep_copy(Bitset<DstDevice>& dst, ConstBitset<SrcDevice> const& src);
+
+template <typename DstDevice, typename SrcDevice>
+void deep_copy(ConstBitset<DstDevice>& dst, ConstBitset<SrcDevice> const& src);
+
+/// A thread safe view to a bitset
+template <typename Device>
+class Bitset {
+ public:
+  using execution_space = typename Device::execution_space;
+  using size_type       = unsigned int;
+
+  static constexpr unsigned BIT_SCAN_REVERSE   = 1u;
+  static constexpr unsigned MOVE_HINT_BACKWARD = 2u;
+
+  static constexpr unsigned BIT_SCAN_FORWARD_MOVE_HINT_FORWARD = 0u;
+  static constexpr unsigned BIT_SCAN_REVERSE_MOVE_HINT_FORWARD =
+      BIT_SCAN_REVERSE;
+  static constexpr unsigned BIT_SCAN_FORWARD_MOVE_HINT_BACKWARD =
+      MOVE_HINT_BACKWARD;
+  static constexpr unsigned BIT_SCAN_REVERSE_MOVE_HINT_BACKWARD =
+      BIT_SCAN_REVERSE | MOVE_HINT_BACKWARD;
+
+ private:
+  enum : unsigned {
+    block_size = static_cast<unsigned>(sizeof(unsigned) * CHAR_BIT)
+  };
+  enum : unsigned { block_mask = block_size - 1u };
+  enum : unsigned {
+    block_shift = Kokkos::Impl::integral_power_of_two(block_size)
+  };
+
+ public:
+  /// constructor
+  /// arg_size := number of bit in set
+  Bitset(unsigned arg_size = 0u)
+      : m_size(arg_size),
+        m_last_block_mask(0u),
+        m_blocks("Bitset", ((m_size + block_mask) >> block_shift)) {
+    for (int i = 0, end = static_cast<int>(m_size & block_mask); i < end; ++i) {
+      m_last_block_mask |= 1u << i;
+    }
+  }
+
+  KOKKOS_DEFAULTED_FUNCTION
+  Bitset(const Bitset<Device>&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  Bitset& operator=(const Bitset<Device>&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  Bitset(Bitset<Device>&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  Bitset& operator=(Bitset<Device>&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~Bitset() = default;
+
+  /// number of bits in the set
+  /// can be call from the host or the device
+  KOKKOS_FORCEINLINE_FUNCTION
+  unsigned size() const { return m_size; }
+
+  /// number of bits which are set to 1
+  /// can only be called from the host
+  unsigned count() const {
+    Impl::BitsetCount<Bitset<Device> > f(*this);
+    return f.apply();
+  }
+
+  /// set all bits to 1
+  /// can only be called from the host
+  void set() {
+    Kokkos::deep_copy(m_blocks, ~0u);
+
+    if (m_last_block_mask) {
+      // clear the unused bits in the last block
+      Kokkos::Impl::DeepCopy<typename Device::memory_space, Kokkos::HostSpace>(
+          m_blocks.data() + (m_blocks.extent(0) - 1u), &m_last_block_mask,
+          sizeof(unsigned));
+      Kokkos::fence(
+          "Bitset::set: fence after clearing unused bits copying from "
+          "HostSpace");
+    }
+  }
+
+  /// set all bits to 0
+  /// can only be called from the host
+  void reset() { Kokkos::deep_copy(m_blocks, 0u); }
+
+  /// set all bits to 0
+  /// can only be called from the host
+  void clear() { Kokkos::deep_copy(m_blocks, 0u); }
+
+  /// set i'th bit to 1
+  /// can only be called from the device
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool set(unsigned i) const {
+    if (i < m_size) {
+      unsigned* block_ptr = &m_blocks[i >> block_shift];
+      const unsigned mask = 1u << static_cast<int>(i & block_mask);
+
+      return !(atomic_fetch_or(block_ptr, mask) & mask);
+    }
+    return false;
+  }
+
+  /// set i'th bit to 0
+  /// can only be called from the device
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool reset(unsigned i) const {
+    if (i < m_size) {
+      unsigned* block_ptr = &m_blocks[i >> block_shift];
+      const unsigned mask = 1u << static_cast<int>(i & block_mask);
+
+      return atomic_fetch_and(block_ptr, ~mask) & mask;
+    }
+    return false;
+  }
+
+  /// return true if the i'th bit set to 1
+  /// can only be called from the device
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool test(unsigned i) const {
+    if (i < m_size) {
+#ifdef KOKKOS_ENABLE_SYCL
+      const unsigned block = Kokkos::atomic_load(&m_blocks[i >> block_shift]);
+#else
+      const unsigned block = volatile_load(&m_blocks[i >> block_shift]);
+#endif
+      const unsigned mask = 1u << static_cast<int>(i & block_mask);
+      return block & mask;
+    }
+    return false;
+  }
+
+  /// used with find_any_set_near or find_any_unset_near functions
+  /// returns the max number of times those functions should be call
+  /// when searching for an available bit
+  KOKKOS_FORCEINLINE_FUNCTION
+  unsigned max_hint() const { return m_blocks.extent(0); }
+
+  /// find a bit set to 1 near the hint
+  /// returns a pair< bool, unsigned> where if result.first is true then
+  /// result.second is the bit found and if result.first is false the
+  /// result.second is a new hint
+  KOKKOS_INLINE_FUNCTION
+  Kokkos::pair<bool, unsigned> find_any_set_near(
+      unsigned hint,
+      unsigned scan_direction = BIT_SCAN_FORWARD_MOVE_HINT_FORWARD) const {
+    const unsigned block_idx =
+        (hint >> block_shift) < m_blocks.extent(0) ? (hint >> block_shift) : 0;
+    const unsigned offset = hint & block_mask;
+#ifdef KOKKOS_ENABLE_SYCL
+    unsigned block = Kokkos::atomic_load(&m_blocks[block_idx]);
+#else
+    unsigned block = volatile_load(&m_blocks[block_idx]);
+#endif
+    block = !m_last_block_mask || (block_idx < (m_blocks.extent(0) - 1))
+                ? block
+                : block & m_last_block_mask;
+
+    return find_any_helper(block_idx, offset, block, scan_direction);
+  }
+
+  /// find a bit set to 0 near the hint
+  /// returns a pair< bool, unsigned> where if result.first is true then
+  /// result.second is the bit found and if result.first is false the
+  /// result.second is a new hint
+  KOKKOS_INLINE_FUNCTION
+  Kokkos::pair<bool, unsigned> find_any_unset_near(
+      unsigned hint,
+      unsigned scan_direction = BIT_SCAN_FORWARD_MOVE_HINT_FORWARD) const {
+    const unsigned block_idx = hint >> block_shift;
+    const unsigned offset    = hint & block_mask;
+#ifdef KOKKOS_ENABLE_SYCL
+    unsigned block = Kokkos::atomic_load(&m_blocks[block_idx]);
+#else
+    unsigned block = volatile_load(&m_blocks[block_idx]);
+#endif
+    block = !m_last_block_mask || (block_idx < (m_blocks.extent(0) - 1))
+                ? ~block
+                : ~block & m_last_block_mask;
+
+    return find_any_helper(block_idx, offset, block, scan_direction);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return m_blocks.is_allocated();
+  }
+
+ private:
+  KOKKOS_FORCEINLINE_FUNCTION
+  Kokkos::pair<bool, unsigned> find_any_helper(unsigned block_idx,
+                                               unsigned offset, unsigned block,
+                                               unsigned scan_direction) const {
+    Kokkos::pair<bool, unsigned> result(block > 0u, 0);
+
+    if (!result.first) {
+      result.second = update_hint(block_idx, offset, scan_direction);
+    } else {
+      result.second =
+          scan_block((block_idx << block_shift), offset, block, scan_direction);
+    }
+    return result;
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  unsigned scan_block(unsigned block_start, int offset, unsigned block,
+                      unsigned scan_direction) const {
+    offset = !(scan_direction & BIT_SCAN_REVERSE)
+                 ? offset
+                 : (offset + block_mask) & block_mask;
+    block = Impl::rotate_right(block, offset);
+    return (((!(scan_direction & BIT_SCAN_REVERSE)
+                  ? Impl::bit_scan_forward(block)
+                  : Impl::int_log2(block)) +
+             offset) &
+            block_mask) +
+           block_start;
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  unsigned update_hint(long long block_idx, unsigned offset,
+                       unsigned scan_direction) const {
+    block_idx += scan_direction & MOVE_HINT_BACKWARD ? -1 : 1;
+    block_idx = block_idx >= 0 ? block_idx : m_blocks.extent(0) - 1;
+    block_idx =
+        block_idx < static_cast<long long>(m_blocks.extent(0)) ? block_idx : 0;
+
+    return static_cast<unsigned>(block_idx) * block_size + offset;
+  }
+
+ private:
+  unsigned m_size;
+  unsigned m_last_block_mask;
+  View<unsigned*, Device, MemoryTraits<RandomAccess> > m_blocks;
+
+ private:
+  template <typename DDevice>
+  friend class Bitset;
+
+  template <typename DDevice>
+  friend class ConstBitset;
+
+  template <typename Bitset>
+  friend struct Impl::BitsetCount;
+
+  template <typename DstDevice, typename SrcDevice>
+  friend void deep_copy(Bitset<DstDevice>& dst, Bitset<SrcDevice> const& src);
+
+  template <typename DstDevice, typename SrcDevice>
+  friend void deep_copy(Bitset<DstDevice>& dst,
+                        ConstBitset<SrcDevice> const& src);
+};
+
+/// a thread-safe view to a const bitset
+/// i.e. can only test bits
+template <typename Device>
+class ConstBitset {
+ public:
+  using execution_space = typename Device::execution_space;
+  using size_type       = unsigned int;
+
+ private:
+  enum { block_size = static_cast<unsigned>(sizeof(unsigned) * CHAR_BIT) };
+  enum { block_mask = block_size - 1u };
+  enum { block_shift = Kokkos::Impl::integral_power_of_two(block_size) };
+
+ public:
+  KOKKOS_FUNCTION
+  ConstBitset() : m_size(0) {}
+
+  KOKKOS_FUNCTION
+  ConstBitset(Bitset<Device> const& rhs)
+      : m_size(rhs.m_size), m_blocks(rhs.m_blocks) {}
+
+  KOKKOS_FUNCTION
+  ConstBitset(ConstBitset<Device> const& rhs)
+      : m_size(rhs.m_size), m_blocks(rhs.m_blocks) {}
+
+  KOKKOS_FUNCTION
+  ConstBitset<Device>& operator=(Bitset<Device> const& rhs) {
+    this->m_size   = rhs.m_size;
+    this->m_blocks = rhs.m_blocks;
+
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  ConstBitset<Device>& operator=(ConstBitset<Device> const& rhs) {
+    this->m_size   = rhs.m_size;
+    this->m_blocks = rhs.m_blocks;
+
+    return *this;
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  unsigned size() const { return m_size; }
+
+  unsigned count() const {
+    Impl::BitsetCount<ConstBitset<Device> > f(*this);
+    return f.apply();
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool test(unsigned i) const {
+    if (i < m_size) {
+      const unsigned block = m_blocks[i >> block_shift];
+      const unsigned mask  = 1u << static_cast<int>(i & block_mask);
+      return block & mask;
+    }
+    return false;
+  }
+
+ private:
+  unsigned m_size;
+  View<const unsigned*, Device, MemoryTraits<RandomAccess> > m_blocks;
+
+ private:
+  template <typename DDevice>
+  friend class ConstBitset;
+
+  template <typename Bitset>
+  friend struct Impl::BitsetCount;
+
+  template <typename DstDevice, typename SrcDevice>
+  friend void deep_copy(Bitset<DstDevice>& dst,
+                        ConstBitset<SrcDevice> const& src);
+
+  template <typename DstDevice, typename SrcDevice>
+  friend void deep_copy(ConstBitset<DstDevice>& dst,
+                        ConstBitset<SrcDevice> const& src);
+};
+
+template <typename DstDevice, typename SrcDevice>
+void deep_copy(Bitset<DstDevice>& dst, Bitset<SrcDevice> const& src) {
+  if (dst.size() != src.size()) {
+    Kokkos::Impl::throw_runtime_exception(
+        "Error: Cannot deep_copy bitsets of different sizes!");
+  }
+
+  Kokkos::fence("Bitset::deep_copy: fence before copy operation");
+  Kokkos::Impl::DeepCopy<typename DstDevice::memory_space,
+                         typename SrcDevice::memory_space>(
+      dst.m_blocks.data(), src.m_blocks.data(),
+      sizeof(unsigned) * src.m_blocks.extent(0));
+  Kokkos::fence("Bitset::deep_copy: fence after copy operation");
+}
+
+template <typename DstDevice, typename SrcDevice>
+void deep_copy(Bitset<DstDevice>& dst, ConstBitset<SrcDevice> const& src) {
+  if (dst.size() != src.size()) {
+    Kokkos::Impl::throw_runtime_exception(
+        "Error: Cannot deep_copy bitsets of different sizes!");
+  }
+
+  Kokkos::fence("Bitset::deep_copy: fence before copy operation");
+  Kokkos::Impl::DeepCopy<typename DstDevice::memory_space,
+                         typename SrcDevice::memory_space>(
+      dst.m_blocks.data(), src.m_blocks.data(),
+      sizeof(unsigned) * src.m_blocks.extent(0));
+  Kokkos::fence("Bitset::deep_copy: fence after copy operation");
+}
+
+template <typename DstDevice, typename SrcDevice>
+void deep_copy(ConstBitset<DstDevice>& dst, ConstBitset<SrcDevice> const& src) {
+  if (dst.size() != src.size()) {
+    Kokkos::Impl::throw_runtime_exception(
+        "Error: Cannot deep_copy bitsets of different sizes!");
+  }
+
+  Kokkos::fence("Bitset::deep_copy: fence before copy operation");
+  Kokkos::Impl::DeepCopy<typename DstDevice::memory_space,
+                         typename SrcDevice::memory_space>(
+      dst.m_blocks.data(), src.m_blocks.data(),
+      sizeof(unsigned) * src.m_blocks.extent(0));
+  Kokkos::fence("Bitset::deep_copy: fence after copy operation");
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_BITSET
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_BITSET
+#endif
+#endif  // KOKKOS_BITSET_HPP
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_DualView.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_DualView.hpp
new file mode 100644 (file)
index 0000000..916c54d
--- /dev/null
@@ -0,0 +1,1272 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_DualView.hpp
+/// \brief Declaration and definition of Kokkos::DualView.
+///
+/// This header file declares and defines Kokkos::DualView and its
+/// related nonmember functions.
+
+#ifndef KOKKOS_DUALVIEW_HPP
+#define KOKKOS_DUALVIEW_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DUALVIEW
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+namespace Kokkos {
+
+/* \class DualView
+ * \brief Container to manage mirroring a Kokkos::View that lives
+ *   in device memory with a Kokkos::View that lives in host memory.
+ *
+ * This class provides capabilities to manage data which exists in two
+ * memory spaces at the same time.  It keeps views of the same layout
+ * on two memory spaces as well as modified flags for both
+ * allocations.  Users are responsible for setting the modified flags
+ * manually if they change the data in either memory space, by calling
+ * the sync() method templated on the device where they modified the
+ * data.  Users may synchronize data by calling the modify() function,
+ * templated on the device towards which they want to synchronize
+ * (i.e., the target of the one-way copy operation).
+ *
+ * The DualView class also provides convenience methods such as
+ * realloc, resize and capacity which call the appropriate methods of
+ * the underlying Kokkos::View objects.
+ *
+ * The four template arguments are the same as those of Kokkos::View.
+ * (Please refer to that class' documentation for a detailed
+ * description.)
+ *
+ *   \tparam DataType The type of the entries stored in the container.
+ *
+ *   \tparam Layout The array's layout in memory.
+ *
+ *   \tparam Device The Kokkos Device type.  If its memory space is
+ *     not the same as the host's memory space, then DualView will
+ *     contain two separate Views: one in device memory, and one in
+ *     host memory.  Otherwise, DualView will only store one View.
+ *
+ *   \tparam MemoryTraits (optional) The user's intended memory access
+ *     behavior.  Please see the documentation of Kokkos::View for
+ *     examples.  The default suffices for most users.
+ */
+
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_CUDA
+
+inline const Kokkos::Cuda& get_cuda_space(const Kokkos::Cuda& in) { return in; }
+
+inline const Kokkos::Cuda& get_cuda_space() {
+  return *Kokkos::Impl::cuda_get_deep_copy_space();
+}
+
+template <typename NonCudaExecSpace>
+inline const Kokkos::Cuda& get_cuda_space(const NonCudaExecSpace&) {
+  return get_cuda_space();
+}
+
+#endif  // KOKKOS_ENABLE_CUDA
+
+}  // namespace Impl
+template <class DataType, class Arg1Type = void, class Arg2Type = void,
+          class Arg3Type = void>
+class DualView : public ViewTraits<DataType, Arg1Type, Arg2Type, Arg3Type> {
+  template <class, class, class, class>
+  friend class DualView;
+
+ public:
+  //! \name Typedefs for device types and various Kokkos::View specializations.
+  //@{
+  using traits = ViewTraits<DataType, Arg1Type, Arg2Type, Arg3Type>;
+
+  //! The Kokkos Host Device type;
+  using host_mirror_space = typename traits::host_mirror_space;
+
+  //! The type of a Kokkos::View on the device.
+  using t_dev = View<typename traits::data_type, Arg1Type, Arg2Type, Arg3Type>;
+
+  /// \typedef t_host
+  /// \brief The type of a Kokkos::View host mirror of \c t_dev.
+  using t_host = typename t_dev::HostMirror;
+
+  //! The type of a const View on the device.
+  //! The type of a Kokkos::View on the device.
+  using t_dev_const =
+      View<typename traits::const_data_type, Arg1Type, Arg2Type, Arg3Type>;
+
+  /// \typedef t_host_const
+  /// \brief The type of a const View host mirror of \c t_dev_const.
+  using t_host_const = typename t_dev_const::HostMirror;
+
+  //! The type of a const, random-access View on the device.
+  using t_dev_const_randomread =
+      View<typename traits::const_data_type, typename traits::array_layout,
+           typename traits::device_type,
+           Kokkos::MemoryTraits<Kokkos::RandomAccess>>;
+
+  /// \typedef t_host_const_randomread
+  /// \brief The type of a const, random-access View host mirror of
+  ///   \c t_dev_const_randomread.
+  using t_host_const_randomread = typename t_dev_const_randomread::HostMirror;
+
+  //! The type of an unmanaged View on the device.
+  using t_dev_um =
+      View<typename traits::data_type, typename traits::array_layout,
+           typename traits::device_type, MemoryUnmanaged>;
+
+  //! The type of an unmanaged View host mirror of \c t_dev_um.
+  using t_host_um =
+      View<typename t_host::data_type, typename t_host::array_layout,
+           typename t_host::device_type, MemoryUnmanaged>;
+
+  //! The type of a const unmanaged View on the device.
+  using t_dev_const_um =
+      View<typename traits::const_data_type, typename traits::array_layout,
+           typename traits::device_type, MemoryUnmanaged>;
+
+  //! The type of a const unmanaged View host mirror of \c t_dev_const_um.
+  using t_host_const_um =
+      View<typename t_host::const_data_type, typename t_host::array_layout,
+           typename t_host::device_type, MemoryUnmanaged>;
+
+  //! The type of a const, random-access View on the device.
+  using t_dev_const_randomread_um =
+      View<typename t_host::const_data_type, typename t_host::array_layout,
+           typename t_host::device_type,
+           Kokkos::MemoryTraits<Kokkos::Unmanaged | Kokkos::RandomAccess>>;
+
+  /// \typedef t_host_const_randomread
+  /// \brief The type of a const, random-access View host mirror of
+  ///   \c t_dev_const_randomread.
+  using t_host_const_randomread_um =
+      typename t_dev_const_randomread_um::HostMirror;
+
+  //@}
+  //! \name Counters to keep track of changes ("modified" flags)
+  //@{
+
+ protected:
+  // modified_flags[0] -> host
+  // modified_flags[1] -> device
+  using t_modified_flags = View<unsigned int[2], LayoutLeft, Kokkos::HostSpace>;
+  t_modified_flags modified_flags;
+
+ public:
+  //@}
+
+  // Moved this specifically after modified_flags to resolve an alignment issue
+  // on MSVC/NVCC
+  //! \name The two View instances.
+  //@{
+  t_dev d_view;
+  t_host h_view;
+  //@}
+
+  //! \name Constructors
+  //@{
+
+  /// \brief Empty constructor.
+  ///
+  /// Both device and host View objects are constructed using their
+  /// default constructors.  The "modified" flags are both initialized
+  /// to "unmodified."
+  DualView() = default;
+
+  /// \brief Constructor that allocates View objects on both host and device.
+  ///
+  /// This constructor works like the analogous constructor of View.
+  /// The first argument is a string label, which is entirely for your
+  /// benefit.  (Different DualView objects may have the same label if
+  /// you like.)  The arguments that follow are the dimensions of the
+  /// View objects.  For example, if the View has three dimensions,
+  /// the first three integer arguments will be nonzero, and you may
+  /// omit the integer arguments that follow.
+  DualView(const std::string& label,
+           const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : modified_flags(
+            Kokkos::view_alloc(typename t_modified_flags::execution_space{},
+                               "DualView::modified_flags")),
+        d_view(label, n0, n1, n2, n3, n4, n5, n6, n7),
+        h_view(create_mirror_view(d_view))  // without UVM, host View mirrors
+  {}
+
+  /// \brief Constructor that allocates View objects on both host and device.
+  ///
+  /// This constructor works like the analogous constructor of View.
+  /// The first arguments are wrapped up in a ViewCtor class, this allows
+  /// for a label, without initializing, and all of the other things that can
+  /// be wrapped up in a Ctor class.
+  /// The arguments that follow are the dimensions of the
+  /// View objects.  For example, if the View has three dimensions,
+  /// the first three integer arguments will be nonzero, and you may
+  /// omit the integer arguments that follow.
+  template <class... P>
+  DualView(const Impl::ViewCtorProp<P...>& arg_prop,
+           std::enable_if_t<!Impl::ViewCtorProp<P...>::has_pointer,
+                            size_t> const n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n1                   = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n2                   = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n3                   = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n4                   = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n5                   = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n6                   = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+           const size_t n7                   = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : modified_flags(t_modified_flags("DualView::modified_flags")),
+        d_view(arg_prop, n0, n1, n2, n3, n4, n5, n6, n7) {
+    // without UVM, host View mirrors
+    if (Kokkos::Impl::has_type<Impl::WithoutInitializing_t, P...>::value)
+      h_view = Kokkos::create_mirror_view(Kokkos::WithoutInitializing, d_view);
+    else
+      h_view = Kokkos::create_mirror_view(d_view);
+  }
+
+  //! Copy constructor (shallow copy)
+  template <class SS, class LS, class DS, class MS>
+  DualView(const DualView<SS, LS, DS, MS>& src)
+      : modified_flags(src.modified_flags),
+        d_view(src.d_view),
+        h_view(src.h_view) {}
+
+  //! Subview constructor
+  template <class SD, class S1, class S2, class S3, class Arg0, class... Args>
+  DualView(const DualView<SD, S1, S2, S3>& src, const Arg0& arg0, Args... args)
+      : modified_flags(src.modified_flags),
+        d_view(Kokkos::subview(src.d_view, arg0, args...)),
+        h_view(Kokkos::subview(src.h_view, arg0, args...)) {}
+
+  /// \brief Create DualView from existing device and host View objects.
+  ///
+  /// This constructor assumes that the device and host View objects
+  /// are synchronized.  You, the caller, are responsible for making
+  /// sure this is the case before calling this constructor.  After
+  /// this constructor returns, you may use DualView's sync() and
+  /// modify() methods to ensure synchronization of the View objects.
+  ///
+  /// \param d_view_ Device View
+  /// \param h_view_ Host View (must have type t_host = t_dev::HostMirror)
+  DualView(const t_dev& d_view_, const t_host& h_view_)
+      : modified_flags(t_modified_flags("DualView::modified_flags")),
+        d_view(d_view_),
+        h_view(h_view_) {
+    if (int(d_view.rank) != int(h_view.rank) ||
+        d_view.extent(0) != h_view.extent(0) ||
+        d_view.extent(1) != h_view.extent(1) ||
+        d_view.extent(2) != h_view.extent(2) ||
+        d_view.extent(3) != h_view.extent(3) ||
+        d_view.extent(4) != h_view.extent(4) ||
+        d_view.extent(5) != h_view.extent(5) ||
+        d_view.extent(6) != h_view.extent(6) ||
+        d_view.extent(7) != h_view.extent(7) ||
+        d_view.stride_0() != h_view.stride_0() ||
+        d_view.stride_1() != h_view.stride_1() ||
+        d_view.stride_2() != h_view.stride_2() ||
+        d_view.stride_3() != h_view.stride_3() ||
+        d_view.stride_4() != h_view.stride_4() ||
+        d_view.stride_5() != h_view.stride_5() ||
+        d_view.stride_6() != h_view.stride_6() ||
+        d_view.stride_7() != h_view.stride_7() ||
+        d_view.span() != h_view.span()) {
+      Kokkos::Impl::throw_runtime_exception(
+          "DualView constructed with incompatible views");
+    }
+  }
+  // does the DualView have only one device
+  struct impl_dualview_is_single_device {
+    enum : bool {
+      value = std::is_same<typename t_dev::device_type,
+                           typename t_host::device_type>::value
+    };
+  };
+
+  // does the given device match the device of t_dev?
+  template <typename Device>
+  struct impl_device_matches_tdev_device {
+    enum : bool {
+      value = std::is_same<typename t_dev::device_type, Device>::value
+    };
+  };
+  // does the given device match the device of t_host?
+  template <typename Device>
+  struct impl_device_matches_thost_device {
+    enum : bool {
+      value = std::is_same<typename t_host::device_type, Device>::value
+    };
+  };
+
+  // does the given device match the execution space of t_host?
+  template <typename Device>
+  struct impl_device_matches_thost_exec {
+    enum : bool {
+      value = std::is_same<typename t_host::execution_space, Device>::value
+    };
+  };
+
+  // does the given device match the execution space of t_dev?
+  template <typename Device>
+  struct impl_device_matches_tdev_exec {
+    enum : bool {
+      value = std::is_same<typename t_dev::execution_space, Device>::value
+    };
+  };
+
+  // does the given device's memory space match the memory space of t_dev?
+  template <typename Device>
+  struct impl_device_matches_tdev_memory_space {
+    enum : bool {
+      value = std::is_same<typename t_dev::memory_space,
+                           typename Device::memory_space>::value
+    };
+  };
+
+  //@}
+  //! \name Methods for synchronizing, marking as modified, and getting Views.
+  //@{
+
+  /// \brief Return a View on a specific device \c Device.
+  ///
+  /// Please don't be afraid of the nested if_c expressions in the return
+  /// value's type.  That just tells the method what the return type
+  /// should be: t_dev if the \c Device template parameter matches
+  /// this DualView's device type, else t_host.
+  ///
+  /// For example, suppose you create a DualView on Cuda, like this:
+  /// \code
+  ///   using dual_view_type =
+  ///       Kokkos::DualView<float, Kokkos::LayoutRight, Kokkos::Cuda>;
+  ///   dual_view_type DV ("my dual view", 100);
+  /// \endcode
+  /// If you want to get the CUDA device View, do this:
+  /// \code
+  ///   typename dual_view_type::t_dev cudaView = DV.view<Kokkos::Cuda> ();
+  /// \endcode
+  /// and if you want to get the host mirror of that View, do this:
+  /// \code
+  ///   using host_device_type = typename Kokkos::HostSpace::execution_space;
+  ///   typename dual_view_type::t_host hostView = DV.view<host_device_type> ();
+  /// \endcode
+  template <class Device>
+  KOKKOS_INLINE_FUNCTION const typename std::conditional_t<
+      impl_device_matches_tdev_device<Device>::value, t_dev,
+      typename std::conditional_t<
+          impl_device_matches_thost_device<Device>::value, t_host,
+          typename std::conditional_t<
+              impl_device_matches_thost_exec<Device>::value, t_host,
+              typename std::conditional_t<
+                  impl_device_matches_tdev_exec<Device>::value, t_dev,
+                  typename std::conditional_t<
+                      impl_device_matches_tdev_memory_space<Device>::value,
+                      t_dev, t_host>>>>>
+  view() const {
+    constexpr bool device_is_memspace =
+        std::is_same<Device, typename Device::memory_space>::value;
+    constexpr bool device_is_execspace =
+        std::is_same<Device, typename Device::execution_space>::value;
+    constexpr bool device_exec_is_t_dev_exec =
+        std::is_same<typename Device::execution_space,
+                     typename t_dev::execution_space>::value;
+    constexpr bool device_mem_is_t_dev_mem =
+        std::is_same<typename Device::memory_space,
+                     typename t_dev::memory_space>::value;
+    constexpr bool device_exec_is_t_host_exec =
+        std::is_same<typename Device::execution_space,
+                     typename t_host::execution_space>::value;
+    constexpr bool device_mem_is_t_host_mem =
+        std::is_same<typename Device::memory_space,
+                     typename t_host::memory_space>::value;
+    constexpr bool device_is_t_host_device =
+        std::is_same<typename Device::execution_space,
+                     typename t_host::device_type>::value;
+    constexpr bool device_is_t_dev_device =
+        std::is_same<typename Device::memory_space,
+                     typename t_host::device_type>::value;
+
+    static_assert(
+        device_is_t_dev_device || device_is_t_host_device ||
+            (device_is_memspace &&
+             (device_mem_is_t_dev_mem || device_mem_is_t_host_mem)) ||
+            (device_is_execspace &&
+             (device_exec_is_t_dev_exec || device_exec_is_t_host_exec)) ||
+            ((!device_is_execspace && !device_is_memspace) &&
+             ((device_mem_is_t_dev_mem || device_mem_is_t_host_mem) ||
+              (device_exec_is_t_dev_exec || device_exec_is_t_host_exec))),
+        "Template parameter to .view() must exactly match one of the "
+        "DualView's device types or one of the execution or memory spaces");
+
+    return Impl::if_c<std::is_same<typename t_dev::memory_space,
+                                   typename Device::memory_space>::value,
+                      t_dev, t_host>::select(d_view, h_view);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  t_host view_host() const { return h_view; }
+
+  KOKKOS_INLINE_FUNCTION
+  t_dev view_device() const { return d_view; }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return (d_view.is_allocated() && h_view.is_allocated());
+  }
+
+  template <class Device>
+  static int get_device_side() {
+    constexpr bool device_is_memspace =
+        std::is_same<Device, typename Device::memory_space>::value;
+    constexpr bool device_is_execspace =
+        std::is_same<Device, typename Device::execution_space>::value;
+    constexpr bool device_exec_is_t_dev_exec =
+        std::is_same<typename Device::execution_space,
+                     typename t_dev::execution_space>::value;
+    constexpr bool device_mem_is_t_dev_mem =
+        std::is_same<typename Device::memory_space,
+                     typename t_dev::memory_space>::value;
+    constexpr bool device_exec_is_t_host_exec =
+        std::is_same<typename Device::execution_space,
+                     typename t_host::execution_space>::value;
+    constexpr bool device_mem_is_t_host_mem =
+        std::is_same<typename Device::memory_space,
+                     typename t_host::memory_space>::value;
+    constexpr bool device_is_t_host_device =
+        std::is_same<typename Device::execution_space,
+                     typename t_host::device_type>::value;
+    constexpr bool device_is_t_dev_device =
+        std::is_same<typename Device::memory_space,
+                     typename t_host::device_type>::value;
+
+    static_assert(
+        device_is_t_dev_device || device_is_t_host_device ||
+            (device_is_memspace &&
+             (device_mem_is_t_dev_mem || device_mem_is_t_host_mem)) ||
+            (device_is_execspace &&
+             (device_exec_is_t_dev_exec || device_exec_is_t_host_exec)) ||
+            ((!device_is_execspace && !device_is_memspace) &&
+             ((device_mem_is_t_dev_mem || device_mem_is_t_host_mem) ||
+              (device_exec_is_t_dev_exec || device_exec_is_t_host_exec))),
+        "Template parameter to .sync() must exactly match one of the "
+        "DualView's device types or one of the execution or memory spaces");
+
+    int dev = -1;
+    if (device_is_t_dev_device)
+      dev = 1;
+    else if (device_is_t_host_device)
+      dev = 0;
+    else {
+      if (device_is_memspace) {
+        if (device_mem_is_t_dev_mem) dev = 1;
+        if (device_mem_is_t_host_mem) dev = 0;
+        if (device_mem_is_t_host_mem && device_mem_is_t_dev_mem) dev = -1;
+      }
+      if (device_is_execspace) {
+        if (device_exec_is_t_dev_exec) dev = 1;
+        if (device_exec_is_t_host_exec) dev = 0;
+        if (device_exec_is_t_host_exec && device_exec_is_t_dev_exec) dev = -1;
+      }
+      if (!device_is_execspace && !device_is_memspace) {
+        if (device_mem_is_t_dev_mem) dev = 1;
+        if (device_mem_is_t_host_mem) dev = 0;
+        if (device_mem_is_t_host_mem && device_mem_is_t_dev_mem) dev = -1;
+        if (device_exec_is_t_dev_exec) dev = 1;
+        if (device_exec_is_t_host_exec) dev = 0;
+        if (device_exec_is_t_host_exec && device_exec_is_t_dev_exec) dev = -1;
+      }
+    }
+    return dev;
+  }
+  static constexpr const int view_header_size = 128;
+  void impl_report_host_sync() const noexcept {
+    if (Kokkos::Tools::Experimental::get_callbacks().sync_dual_view !=
+        nullptr) {
+      Kokkos::Tools::syncDualView(
+          h_view.label(),
+          reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(h_view.data()) -
+                                  view_header_size),
+          false);
+    }
+  }
+  void impl_report_device_sync() const noexcept {
+    if (Kokkos::Tools::Experimental::get_callbacks().sync_dual_view !=
+        nullptr) {
+      Kokkos::Tools::syncDualView(
+          d_view.label(),
+          reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(d_view.data()) -
+                                  view_header_size),
+          true);
+    }
+  }
+
+  /// \brief Update data on device or host only if data in the other
+  ///   space has been marked as modified.
+  ///
+  /// If \c Device is the same as this DualView's device type, then
+  /// copy data from host to device.  Otherwise, copy data from device
+  /// to host.  In either case, only copy if the source of the copy
+  /// has been modified.
+  ///
+  /// This is a one-way synchronization only.  If the target of the
+  /// copy has been modified, this operation will discard those
+  /// modifications.  It will also reset both device and host modified
+  /// flags.
+  ///
+  /// \note This method doesn't know on its own whether you modified
+  ///   the data in either View.  You must manually mark modified data
+  ///   as modified, by calling the modify() method with the
+  ///   appropriate template parameter.
+  // deliberately passing args by cref as they're used multiple times
+  template <class Device, class... Args>
+  void sync_impl(std::true_type, Args const&... args) {
+    if (modified_flags.data() == nullptr) return;
+
+    int dev = get_device_side<Device>();
+
+    if (dev == 1) {  // if Device is the same as DualView's device type
+      if ((modified_flags(0) > 0) && (modified_flags(0) >= modified_flags(1))) {
+#ifdef KOKKOS_ENABLE_CUDA
+        if (std::is_same<typename t_dev::memory_space,
+                         Kokkos::CudaUVMSpace>::value) {
+          if (d_view.data() == h_view.data())
+            Kokkos::Impl::cuda_prefetch_pointer(
+                Impl::get_cuda_space(args...), d_view.data(),
+                sizeof(typename t_dev::value_type) * d_view.span(), true);
+        }
+#endif
+
+        deep_copy(args..., d_view, h_view);
+        modified_flags(0) = modified_flags(1) = 0;
+        impl_report_device_sync();
+      }
+    }
+    if (dev == 0) {  // hopefully Device is the same as DualView's host type
+      if ((modified_flags(1) > 0) && (modified_flags(1) >= modified_flags(0))) {
+#ifdef KOKKOS_ENABLE_CUDA
+        if (std::is_same<typename t_dev::memory_space,
+                         Kokkos::CudaUVMSpace>::value) {
+          if (d_view.data() == h_view.data())
+            Kokkos::Impl::cuda_prefetch_pointer(
+                Impl::get_cuda_space(args...), d_view.data(),
+                sizeof(typename t_dev::value_type) * d_view.span(), false);
+        }
+#endif
+
+        deep_copy(args..., h_view, d_view);
+        modified_flags(0) = modified_flags(1) = 0;
+        impl_report_host_sync();
+      }
+    }
+    if (std::is_same<typename t_host::memory_space,
+                     typename t_dev::memory_space>::value) {
+      typename t_dev::execution_space().fence(
+          "Kokkos::DualView<>::sync: fence after syncing DualView");
+      typename t_host::execution_space().fence(
+          "Kokkos::DualView<>::sync: fence after syncing DualView");
+    }
+  }
+
+  template <class Device>
+  void sync(const std::enable_if_t<
+                (std::is_same<typename traits::data_type,
+                              typename traits::non_const_data_type>::value) ||
+                    (std::is_same<Device, int>::value),
+                int>& = 0) {
+    sync_impl<Device>(std::true_type{});
+  }
+
+  template <class Device, class ExecutionSpace>
+  void sync(const ExecutionSpace& exec,
+            const std::enable_if_t<
+                (std::is_same<typename traits::data_type,
+                              typename traits::non_const_data_type>::value) ||
+                    (std::is_same<Device, int>::value),
+                int>& = 0) {
+    sync_impl<Device>(std::true_type{}, exec);
+  }
+
+  // deliberately passing args by cref as they're used multiple times
+  template <class Device, class... Args>
+  void sync_impl(std::false_type, Args const&...) {
+    if (modified_flags.data() == nullptr) return;
+
+    int dev = get_device_side<Device>();
+
+    if (dev == 1) {  // if Device is the same as DualView's device type
+      if ((modified_flags(0) > 0) && (modified_flags(0) >= modified_flags(1))) {
+        Impl::throw_runtime_exception(
+            "Calling sync on a DualView with a const datatype.");
+      }
+      impl_report_device_sync();
+    }
+    if (dev == 0) {  // hopefully Device is the same as DualView's host type
+      if ((modified_flags(1) > 0) && (modified_flags(1) >= modified_flags(0))) {
+        Impl::throw_runtime_exception(
+            "Calling sync on a DualView with a const datatype.");
+      }
+      impl_report_host_sync();
+    }
+  }
+
+  template <class Device>
+  void sync(const std::enable_if_t<
+                (!std::is_same<typename traits::data_type,
+                               typename traits::non_const_data_type>::value) ||
+                    (std::is_same<Device, int>::value),
+                int>& = 0) {
+    sync_impl<Device>(std::false_type{});
+  }
+  template <class Device, class ExecutionSpace>
+  void sync(const ExecutionSpace& exec,
+            const std::enable_if_t<
+                (!std::is_same<typename traits::data_type,
+                               typename traits::non_const_data_type>::value) ||
+                    (std::is_same<Device, int>::value),
+                int>& = 0) {
+    sync_impl<Device>(std::false_type{}, exec);
+  }
+
+  // deliberately passing args by cref as they're used multiple times
+  template <typename... Args>
+  void sync_host_impl(Args const&... args) {
+    if (!std::is_same<typename traits::data_type,
+                      typename traits::non_const_data_type>::value)
+      Impl::throw_runtime_exception(
+          "Calling sync_host on a DualView with a const datatype.");
+    if (modified_flags.data() == nullptr) return;
+    if (modified_flags(1) > modified_flags(0)) {
+#ifdef KOKKOS_ENABLE_CUDA
+      if (std::is_same<typename t_dev::memory_space,
+                       Kokkos::CudaUVMSpace>::value) {
+        if (d_view.data() == h_view.data())
+          Kokkos::Impl::cuda_prefetch_pointer(
+              Impl::get_cuda_space(args...), d_view.data(),
+              sizeof(typename t_dev::value_type) * d_view.span(), false);
+      }
+#endif
+
+      deep_copy(args..., h_view, d_view);
+      modified_flags(1) = modified_flags(0) = 0;
+      impl_report_host_sync();
+    }
+  }
+
+  template <class ExecSpace>
+  void sync_host(const ExecSpace& exec) {
+    sync_host_impl(exec);
+  }
+  void sync_host() { sync_host_impl(); }
+
+  // deliberately passing args by cref as they're used multiple times
+  template <typename... Args>
+  void sync_device_impl(Args const&... args) {
+    if (!std::is_same<typename traits::data_type,
+                      typename traits::non_const_data_type>::value)
+      Impl::throw_runtime_exception(
+          "Calling sync_device on a DualView with a const datatype.");
+    if (modified_flags.data() == nullptr) return;
+    if (modified_flags(0) > modified_flags(1)) {
+#ifdef KOKKOS_ENABLE_CUDA
+      if (std::is_same<typename t_dev::memory_space,
+                       Kokkos::CudaUVMSpace>::value) {
+        if (d_view.data() == h_view.data())
+          Kokkos::Impl::cuda_prefetch_pointer(
+              Impl::get_cuda_space(args...), d_view.data(),
+              sizeof(typename t_dev::value_type) * d_view.span(), true);
+      }
+#endif
+
+      deep_copy(args..., d_view, h_view);
+      modified_flags(1) = modified_flags(0) = 0;
+      impl_report_device_sync();
+    }
+  }
+
+  template <class ExecSpace>
+  void sync_device(const ExecSpace& exec) {
+    sync_device_impl(exec);
+  }
+  void sync_device() { sync_device_impl(); }
+
+  template <class Device>
+  bool need_sync() const {
+    if (modified_flags.data() == nullptr) return false;
+    int dev = get_device_side<Device>();
+
+    if (dev == 1) {  // if Device is the same as DualView's device type
+      if ((modified_flags(0) > 0) && (modified_flags(0) >= modified_flags(1))) {
+        return true;
+      }
+    }
+    if (dev == 0) {  // hopefully Device is the same as DualView's host type
+      if ((modified_flags(1) > 0) && (modified_flags(1) >= modified_flags(0))) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  inline bool need_sync_host() const {
+    if (modified_flags.data() == nullptr) return false;
+    return modified_flags(0) < modified_flags(1);
+  }
+
+  inline bool need_sync_device() const {
+    if (modified_flags.data() == nullptr) return false;
+    return modified_flags(1) < modified_flags(0);
+  }
+  void impl_report_device_modification() {
+    if (Kokkos::Tools::Experimental::get_callbacks().modify_dual_view !=
+        nullptr) {
+      Kokkos::Tools::modifyDualView(
+          d_view.label(),
+          reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(d_view.data()) -
+                                  view_header_size),
+          true);
+    }
+  }
+  void impl_report_host_modification() {
+    if (Kokkos::Tools::Experimental::get_callbacks().modify_dual_view !=
+        nullptr) {
+      Kokkos::Tools::modifyDualView(
+          h_view.label(),
+          reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(h_view.data()) -
+                                  view_header_size),
+          false);
+    }
+  }
+  /// \brief Mark data as modified on the given device \c Device.
+  ///
+  /// If \c Device is the same as this DualView's device type, then
+  /// mark the device's data as modified.  Otherwise, mark the host's
+  /// data as modified.
+  template <class Device, class Dummy = DualView,
+            std::enable_if_t<!Dummy::impl_dualview_is_single_device::value>* =
+                nullptr>
+  void modify() {
+    if (modified_flags.data() == nullptr) {
+      modified_flags = t_modified_flags("DualView::modified_flags");
+    }
+
+    int dev = get_device_side<Device>();
+
+    if (dev == 1) {  // if Device is the same as DualView's device type
+      // Increment the device's modified count.
+      modified_flags(1) =
+          (modified_flags(1) > modified_flags(0) ? modified_flags(1)
+                                                 : modified_flags(0)) +
+          1;
+      impl_report_device_modification();
+    }
+    if (dev == 0) {  // hopefully Device is the same as DualView's host type
+      // Increment the host's modified count.
+      modified_flags(0) =
+          (modified_flags(1) > modified_flags(0) ? modified_flags(1)
+                                                 : modified_flags(0)) +
+          1;
+      impl_report_host_modification();
+    }
+
+#ifdef KOKKOS_ENABLE_DEBUG_DUALVIEW_MODIFY_CHECK
+    if (modified_flags(0) && modified_flags(1)) {
+      std::string msg = "Kokkos::DualView::modify ERROR: ";
+      msg += "Concurrent modification of host and device views ";
+      msg += "in DualView \"";
+      msg += d_view.label();
+      msg += "\"\n";
+      Kokkos::abort(msg.c_str());
+    }
+#endif
+  }
+
+  template <
+      class Device, class Dummy = DualView,
+      std::enable_if_t<Dummy::impl_dualview_is_single_device::value>* = nullptr>
+  void modify() {
+    return;
+  }
+
+  template <class Dummy = DualView,
+            std::enable_if_t<!Dummy::impl_dualview_is_single_device::value>* =
+                nullptr>
+  inline void modify_host() {
+    if (modified_flags.data() != nullptr) {
+      modified_flags(0) =
+          (modified_flags(1) > modified_flags(0) ? modified_flags(1)
+                                                 : modified_flags(0)) +
+          1;
+      impl_report_host_modification();
+#ifdef KOKKOS_ENABLE_DEBUG_DUALVIEW_MODIFY_CHECK
+      if (modified_flags(0) && modified_flags(1)) {
+        std::string msg = "Kokkos::DualView::modify_host ERROR: ";
+        msg += "Concurrent modification of host and device views ";
+        msg += "in DualView \"";
+        msg += d_view.label();
+        msg += "\"\n";
+        Kokkos::abort(msg.c_str());
+      }
+#endif
+    }
+  }
+
+  template <
+      class Dummy = DualView,
+      std::enable_if_t<Dummy::impl_dualview_is_single_device::value>* = nullptr>
+  inline void modify_host() {
+    return;
+  }
+
+  template <class Dummy = DualView,
+            std::enable_if_t<!Dummy::impl_dualview_is_single_device::value>* =
+                nullptr>
+  inline void modify_device() {
+    if (modified_flags.data() != nullptr) {
+      modified_flags(1) =
+          (modified_flags(1) > modified_flags(0) ? modified_flags(1)
+                                                 : modified_flags(0)) +
+          1;
+      impl_report_device_modification();
+#ifdef KOKKOS_ENABLE_DEBUG_DUALVIEW_MODIFY_CHECK
+      if (modified_flags(0) && modified_flags(1)) {
+        std::string msg = "Kokkos::DualView::modify_device ERROR: ";
+        msg += "Concurrent modification of host and device views ";
+        msg += "in DualView \"";
+        msg += d_view.label();
+        msg += "\"\n";
+        Kokkos::abort(msg.c_str());
+      }
+#endif
+    }
+  }
+
+  template <
+      class Dummy = DualView,
+      std::enable_if_t<Dummy::impl_dualview_is_single_device::value>* = nullptr>
+  inline void modify_device() {
+    return;
+  }
+
+  inline void clear_sync_state() {
+    if (modified_flags.data() != nullptr)
+      modified_flags(1) = modified_flags(0) = 0;
+  }
+
+  //@}
+  //! \name Methods for reallocating or resizing the View objects.
+  //@{
+
+  /// \brief Reallocate both View objects.
+  ///
+  /// This discards any existing contents of the objects, and resets
+  /// their modified flags.  It does <i>not</i> copy the old contents
+  /// of either View into the new View objects.
+  template <class... ViewCtorArgs>
+  void impl_realloc(const size_t n0, const size_t n1, const size_t n2,
+                    const size_t n3, const size_t n4, const size_t n5,
+                    const size_t n6, const size_t n7,
+                    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+    using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+    static_assert(!alloc_prop_input::has_label,
+                  "The view constructor arguments passed to Kokkos::realloc "
+                  "must not include a label!");
+    static_assert(
+        !alloc_prop_input::has_pointer,
+        "The view constructor arguments passed to Kokkos::realloc must "
+        "not include a pointer!");
+    static_assert(
+        !alloc_prop_input::has_memory_space,
+        "The view constructor arguments passed to Kokkos::realloc must "
+        "not include a memory space instance!");
+
+    const size_t new_extents[8] = {n0, n1, n2, n3, n4, n5, n6, n7};
+    const bool sizeMismatch =
+        Impl::size_mismatch(h_view, h_view.rank_dynamic, new_extents);
+
+    if (sizeMismatch) {
+      ::Kokkos::realloc(arg_prop, d_view, n0, n1, n2, n3, n4, n5, n6, n7);
+      if (alloc_prop_input::initialize) {
+        h_view = create_mirror_view(typename t_host::memory_space(), d_view);
+      } else {
+        h_view = create_mirror_view(Kokkos::WithoutInitializing,
+                                    typename t_host::memory_space(), d_view);
+      }
+    } else if (alloc_prop_input::initialize) {
+      if (alloc_prop_input::has_execution_space) {
+        // Add execution_space if not provided to avoid need for if constexpr
+        using alloc_prop = Impl::ViewCtorProp<
+            ViewCtorArgs...,
+            std::conditional_t<alloc_prop_input::has_execution_space,
+                               std::integral_constant<unsigned int, 2>,
+                               typename t_dev::execution_space>>;
+        alloc_prop arg_prop_copy(arg_prop);
+        using execution_space_type = typename alloc_prop::execution_space;
+        const execution_space_type& exec_space =
+            static_cast<
+                Kokkos::Impl::ViewCtorProp<void, execution_space_type> const&>(
+                arg_prop_copy)
+                .value;
+        ::Kokkos::deep_copy(exec_space, d_view, typename t_dev::value_type{});
+      } else
+        ::Kokkos::deep_copy(d_view, typename t_dev::value_type{});
+    }
+
+    /* Reset dirty flags */
+    if (modified_flags.data() == nullptr) {
+      modified_flags = t_modified_flags("DualView::modified_flags");
+    } else
+      modified_flags(1) = modified_flags(0) = 0;
+  }
+
+  template <class... ViewCtorArgs>
+  void realloc(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+               const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    impl_realloc(n0, n1, n2, n3, n4, n5, n6, n7, arg_prop);
+  }
+
+  void realloc(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    impl_realloc(n0, n1, n2, n3, n4, n5, n6, n7, Impl::ViewCtorProp<>{});
+  }
+
+  template <typename I>
+  std::enable_if_t<Impl::is_view_ctor_property<I>::value> realloc(
+      const I& arg_prop, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    impl_realloc(n0, n1, n2, n3, n4, n5, n6, n7, Kokkos::view_alloc(arg_prop));
+  }
+
+  /// \brief Resize both views, copying old contents into new if necessary.
+  ///
+  /// This method only copies the old contents into the new View
+  /// objects for the device which was last marked as modified.
+  template <class... ViewCtorArgs>
+  void impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+                   const size_t n0, const size_t n1, const size_t n2,
+                   const size_t n3, const size_t n4, const size_t n5,
+                   const size_t n6, const size_t n7) {
+    using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+    static_assert(!alloc_prop_input::has_label,
+                  "The view constructor arguments passed to Kokkos::resize "
+                  "must not include a label!");
+    static_assert(
+        !alloc_prop_input::has_pointer,
+        "The view constructor arguments passed to Kokkos::resize must "
+        "not include a pointer!");
+    static_assert(
+        !alloc_prop_input::has_memory_space,
+        "The view constructor arguments passed to Kokkos::resize must "
+        "not include a memory space instance!");
+
+    const size_t new_extents[8] = {n0, n1, n2, n3, n4, n5, n6, n7};
+    const bool sizeMismatch =
+        Impl::size_mismatch(h_view, h_view.rank_dynamic, new_extents);
+
+    if (modified_flags.data() == nullptr) {
+      modified_flags = t_modified_flags("DualView::modified_flags");
+    }
+    if (modified_flags(1) >= modified_flags(0)) {
+      /* Resize on Device */
+      if (sizeMismatch) {
+        ::Kokkos::resize(arg_prop, d_view, n0, n1, n2, n3, n4, n5, n6, n7);
+        if (alloc_prop_input::initialize) {
+          h_view = create_mirror_view(typename t_host::memory_space(), d_view);
+        } else {
+          h_view = create_mirror_view(Kokkos::WithoutInitializing,
+                                      typename t_host::memory_space(), d_view);
+        }
+
+        /* Mark Device copy as modified */
+        ++modified_flags(1);
+      }
+    } else {
+      /* Resize on Host */
+      if (sizeMismatch) {
+        ::Kokkos::resize(arg_prop, h_view, n0, n1, n2, n3, n4, n5, n6, n7);
+        if (alloc_prop_input::initialize) {
+          d_view = create_mirror_view(typename t_dev::memory_space(), h_view);
+
+        } else {
+          d_view = create_mirror_view(Kokkos::WithoutInitializing,
+                                      typename t_dev::memory_space(), h_view);
+        }
+
+        /* Mark Host copy as modified */
+        ++modified_flags(0);
+      }
+    }
+  }
+
+  void resize(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    impl_resize(Impl::ViewCtorProp<>{}, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  template <class... ViewCtorArgs>
+  void resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+              const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    impl_resize(arg_prop, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  template <class I>
+  std::enable_if_t<Impl::is_view_ctor_property<I>::value> resize(
+      const I& arg_prop, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    impl_resize(Kokkos::view_alloc(arg_prop), n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  //@}
+  //! \name Methods for getting capacity, stride, or dimension(s).
+  //@{
+
+  //! The allocation size (same as Kokkos::View::span).
+  KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return d_view.span(); }
+
+  KOKKOS_INLINE_FUNCTION bool span_is_contiguous() const {
+    return d_view.span_is_contiguous();
+  }
+
+  //! Get stride(s) for each dimension.
+  template <typename iType>
+  void stride(iType* stride_) const {
+    d_view.stride(stride_);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, size_t>
+  extent(const iType& r) const {
+    return d_view.extent(r);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, int>
+  extent_int(const iType& r) const {
+    return static_cast<int>(d_view.extent(r));
+  }
+
+  //@}
+};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+//
+// Partial specializations of Kokkos::subview() for DualView objects.
+//
+
+namespace Kokkos {
+namespace Impl {
+
+template <class D, class A1, class A2, class A3, class... Args>
+struct DualViewSubview {
+  using dst_traits = typename Kokkos::Impl::ViewMapping<
+      void, Kokkos::ViewTraits<D, A1, A2, A3>, Args...>::traits_type;
+
+  using type = Kokkos::DualView<
+      typename dst_traits::data_type, typename dst_traits::array_layout,
+      typename dst_traits::device_type, typename dst_traits::memory_traits>;
+};
+
+} /* namespace Impl */
+
+template <class D, class A1, class A2, class A3, class... Args>
+typename Impl::DualViewSubview<D, A1, A2, A3, Args...>::type subview(
+    const DualView<D, A1, A2, A3>& src, Args... args) {
+  return typename Impl::DualViewSubview<D, A1, A2, A3, Args...>::type(src,
+                                                                      args...);
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+//
+// Partial specialization of Kokkos::deep_copy() for DualView objects.
+//
+
+template <class DT, class DL, class DD, class DM, class ST, class SL, class SD,
+          class SM>
+void deep_copy(
+    DualView<DT, DL, DD, DM> dst,  // trust me, this must not be a reference
+    const DualView<ST, SL, SD, SM>& src) {
+  if (src.need_sync_device()) {
+    deep_copy(dst.h_view, src.h_view);
+    dst.modify_host();
+  } else {
+    deep_copy(dst.d_view, src.d_view);
+    dst.modify_device();
+  }
+}
+
+template <class ExecutionSpace, class DT, class DL, class DD, class DM,
+          class ST, class SL, class SD, class SM>
+void deep_copy(
+    const ExecutionSpace& exec,
+    DualView<DT, DL, DD, DM> dst,  // trust me, this must not be a reference
+    const DualView<ST, SL, SD, SM>& src) {
+  if (src.need_sync_device()) {
+    deep_copy(exec, dst.h_view, src.h_view);
+    dst.modify_host();
+  } else {
+    deep_copy(exec, dst.d_view, src.d_view);
+    dst.modify_device();
+  }
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+//
+// Non-member resize and realloc
+//
+
+template <class... Properties, class... Args>
+void resize(DualView<Properties...>& dv, Args&&... args) noexcept(
+    noexcept(dv.resize(std::forward<Args>(args)...))) {
+  dv.resize(std::forward<Args>(args)...);
+}
+
+template <class... ViewCtorArgs, class... Properties, class... Args>
+void resize(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    DualView<Properties...>& dv,
+    Args&&... args) noexcept(noexcept(dv.resize(arg_prop,
+                                                std::forward<Args>(args)...))) {
+  dv.resize(arg_prop, std::forward<Args>(args)...);
+}
+
+template <class I, class... Properties, class... Args>
+std::enable_if_t<Impl::is_view_ctor_property<I>::value> resize(
+    const I& arg_prop, DualView<Properties...>& dv,
+    Args&&... args) noexcept(noexcept(dv.resize(arg_prop,
+                                                std::forward<Args>(args)...))) {
+  dv.resize(arg_prop, std::forward<Args>(args)...);
+}
+
+template <class... ViewCtorArgs, class... Properties, class... Args>
+void realloc(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+             DualView<Properties...>& dv,
+             Args&&... args) noexcept(noexcept(dv
+                                                   .realloc(std::forward<Args>(
+                                                       args)...))) {
+  dv.realloc(arg_prop, std::forward<Args>(args)...);
+}
+
+template <class... Properties, class... Args>
+void realloc(DualView<Properties...>& dv, Args&&... args) noexcept(
+    noexcept(dv.realloc(std::forward<Args>(args)...))) {
+  dv.realloc(std::forward<Args>(args)...);
+}
+
+template <class I, class... Properties, class... Args>
+std::enable_if_t<Impl::is_view_ctor_property<I>::value> realloc(
+    const I& arg_prop, DualView<Properties...>& dv,
+    Args&&... args) noexcept(noexcept(dv.realloc(arg_prop,
+                                                 std::forward<Args>(
+                                                     args)...))) {
+  dv.realloc(arg_prop, std::forward<Args>(args)...);
+}
+
+}  // end namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DUALVIEW
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DUALVIEW
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_DynRankView.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_DynRankView.hpp
new file mode 100644 (file)
index 0000000..442f0d8
--- /dev/null
@@ -0,0 +1,2504 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_DynRankView.hpp
+/// \brief Declaration and definition of Kokkos::DynRankView.
+///
+/// This header file declares and defines Kokkos::DynRankView and its
+/// related nonmember functions.
+
+#ifndef KOKKOS_DYNRANKVIEW_HPP
+#define KOKKOS_DYNRANKVIEW_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <type_traits>
+
+namespace Kokkos {
+
+template <typename DataType, class... Properties>
+class DynRankView;  // forward declare
+
+namespace Impl {
+
+template <typename Specialize>
+struct DynRankDimTraits {
+  enum : size_t { unspecified = KOKKOS_INVALID_INDEX };
+
+  // Compute the rank of the view from the nonzero dimension arguments.
+  KOKKOS_INLINE_FUNCTION
+  static size_t computeRank(const size_t N0, const size_t N1, const size_t N2,
+                            const size_t N3, const size_t N4, const size_t N5,
+                            const size_t N6, const size_t /* N7 */) {
+    return (
+        (N6 == unspecified && N5 == unspecified && N4 == unspecified &&
+         N3 == unspecified && N2 == unspecified && N1 == unspecified &&
+         N0 == unspecified)
+            ? 0
+            : ((N6 == unspecified && N5 == unspecified && N4 == unspecified &&
+                N3 == unspecified && N2 == unspecified && N1 == unspecified)
+                   ? 1
+                   : ((N6 == unspecified && N5 == unspecified &&
+                       N4 == unspecified && N3 == unspecified &&
+                       N2 == unspecified)
+                          ? 2
+                          : ((N6 == unspecified && N5 == unspecified &&
+                              N4 == unspecified && N3 == unspecified)
+                                 ? 3
+                                 : ((N6 == unspecified && N5 == unspecified &&
+                                     N4 == unspecified)
+                                        ? 4
+                                        : ((N6 == unspecified &&
+                                            N5 == unspecified)
+                                               ? 5
+                                               : ((N6 == unspecified)
+                                                      ? 6
+                                                      : 7)))))));
+  }
+
+  // Compute the rank of the view from the nonzero layout arguments.
+  template <typename Layout>
+  KOKKOS_INLINE_FUNCTION static size_t computeRank(const Layout& layout) {
+    return computeRank(layout.dimension[0], layout.dimension[1],
+                       layout.dimension[2], layout.dimension[3],
+                       layout.dimension[4], layout.dimension[5],
+                       layout.dimension[6], layout.dimension[7]);
+  }
+
+  // Extra overload to match that for specialize types v2
+  template <typename Layout, typename... P>
+  KOKKOS_INLINE_FUNCTION static size_t computeRank(
+      const Kokkos::Impl::ViewCtorProp<P...>& /* prop */,
+      const Layout& layout) {
+    return computeRank(layout);
+  }
+
+  // Create the layout for the rank-7 view.
+  // Non-strided Layout
+  template <typename Layout>
+  KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+      (std::is_same<Layout, Kokkos::LayoutRight>::value ||
+       std::is_same<Layout, Kokkos::LayoutLeft>::value),
+      Layout>
+  createLayout(const Layout& layout) {
+    return Layout(layout.dimension[0] != unspecified ? layout.dimension[0] : 1,
+                  layout.dimension[1] != unspecified ? layout.dimension[1] : 1,
+                  layout.dimension[2] != unspecified ? layout.dimension[2] : 1,
+                  layout.dimension[3] != unspecified ? layout.dimension[3] : 1,
+                  layout.dimension[4] != unspecified ? layout.dimension[4] : 1,
+                  layout.dimension[5] != unspecified ? layout.dimension[5] : 1,
+                  layout.dimension[6] != unspecified ? layout.dimension[6] : 1,
+                  layout.dimension[7] != unspecified ? layout.dimension[7] : 1);
+  }
+
+  // LayoutStride
+  template <typename Layout>
+  KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+      (std::is_same<Layout, Kokkos::LayoutStride>::value), Layout>
+  createLayout(const Layout& layout) {
+    return Layout(layout.dimension[0] != unspecified ? layout.dimension[0] : 1,
+                  layout.stride[0],
+                  layout.dimension[1] != unspecified ? layout.dimension[1] : 1,
+                  layout.stride[1],
+                  layout.dimension[2] != unspecified ? layout.dimension[2] : 1,
+                  layout.stride[2],
+                  layout.dimension[3] != unspecified ? layout.dimension[3] : 1,
+                  layout.stride[3],
+                  layout.dimension[4] != unspecified ? layout.dimension[4] : 1,
+                  layout.stride[4],
+                  layout.dimension[5] != unspecified ? layout.dimension[5] : 1,
+                  layout.stride[5],
+                  layout.dimension[6] != unspecified ? layout.dimension[6] : 1,
+                  layout.stride[6],
+                  layout.dimension[7] != unspecified ? layout.dimension[7] : 1,
+                  layout.stride[7]);
+  }
+
+  // Extra overload to match that for specialize types
+  template <typename Traits, typename... P>
+  KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+      (std::is_same<typename Traits::array_layout,
+                    Kokkos::LayoutRight>::value ||
+       std::is_same<typename Traits::array_layout, Kokkos::LayoutLeft>::value ||
+       std::is_same<typename Traits::array_layout,
+                    Kokkos::LayoutStride>::value),
+      typename Traits::array_layout>
+  createLayout(const Kokkos::Impl::ViewCtorProp<P...>& /* prop */,
+               const typename Traits::array_layout& layout) {
+    return createLayout(layout);
+  }
+
+  // Create a view from the given dimension arguments.
+  // This is only necessary because the shmem constructor doesn't take a layout.
+  //   NDE shmem View's are not compatible with the added view_alloc value_type
+  //   / fad_dim deduction functionality
+  template <typename ViewType, typename ViewArg>
+  static ViewType createView(const ViewArg& arg, const size_t N0,
+                             const size_t N1, const size_t N2, const size_t N3,
+                             const size_t N4, const size_t N5, const size_t N6,
+                             const size_t N7) {
+    return ViewType(arg, N0 != unspecified ? N0 : 1, N1 != unspecified ? N1 : 1,
+                    N2 != unspecified ? N2 : 1, N3 != unspecified ? N3 : 1,
+                    N4 != unspecified ? N4 : 1, N5 != unspecified ? N5 : 1,
+                    N6 != unspecified ? N6 : 1, N7 != unspecified ? N7 : 1);
+  }
+};
+
+// Non-strided Layout
+template <typename Layout, typename iType>
+KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+    (std::is_same<Layout, Kokkos::LayoutRight>::value ||
+     std::is_same<Layout, Kokkos::LayoutLeft>::value) &&
+        std::is_integral<iType>::value,
+    Layout>
+reconstructLayout(const Layout& layout, iType dynrank) {
+  return Layout(dynrank > 0 ? layout.dimension[0] : KOKKOS_INVALID_INDEX,
+                dynrank > 1 ? layout.dimension[1] : KOKKOS_INVALID_INDEX,
+                dynrank > 2 ? layout.dimension[2] : KOKKOS_INVALID_INDEX,
+                dynrank > 3 ? layout.dimension[3] : KOKKOS_INVALID_INDEX,
+                dynrank > 4 ? layout.dimension[4] : KOKKOS_INVALID_INDEX,
+                dynrank > 5 ? layout.dimension[5] : KOKKOS_INVALID_INDEX,
+                dynrank > 6 ? layout.dimension[6] : KOKKOS_INVALID_INDEX,
+                dynrank > 7 ? layout.dimension[7] : KOKKOS_INVALID_INDEX);
+}
+
+// LayoutStride
+template <typename Layout, typename iType>
+KOKKOS_INLINE_FUNCTION static std::enable_if_t<
+    (std::is_same<Layout, Kokkos::LayoutStride>::value) &&
+        std::is_integral<iType>::value,
+    Layout>
+reconstructLayout(const Layout& layout, iType dynrank) {
+  return Layout(dynrank > 0 ? layout.dimension[0] : KOKKOS_INVALID_INDEX,
+                dynrank > 0 ? layout.stride[0] : (0),
+                dynrank > 1 ? layout.dimension[1] : KOKKOS_INVALID_INDEX,
+                dynrank > 1 ? layout.stride[1] : (0),
+                dynrank > 2 ? layout.dimension[2] : KOKKOS_INVALID_INDEX,
+                dynrank > 2 ? layout.stride[2] : (0),
+                dynrank > 3 ? layout.dimension[3] : KOKKOS_INVALID_INDEX,
+                dynrank > 3 ? layout.stride[3] : (0),
+                dynrank > 4 ? layout.dimension[4] : KOKKOS_INVALID_INDEX,
+                dynrank > 4 ? layout.stride[4] : (0),
+                dynrank > 5 ? layout.dimension[5] : KOKKOS_INVALID_INDEX,
+                dynrank > 5 ? layout.stride[5] : (0),
+                dynrank > 6 ? layout.dimension[6] : KOKKOS_INVALID_INDEX,
+                dynrank > 6 ? layout.stride[6] : (0),
+                dynrank > 7 ? layout.dimension[7] : KOKKOS_INVALID_INDEX,
+                dynrank > 7 ? layout.stride[7] : (0));
+}
+
+/** \brief  Debug bounds-checking routines */
+// Enhanced debug checking - most infrastructure matches that of functions in
+// Kokkos_ViewMapping; additional checks for extra arguments beyond rank are 0
+template <unsigned, typename iType0, class MapType>
+KOKKOS_INLINE_FUNCTION bool dyn_rank_view_verify_operator_bounds(
+    const iType0&, const MapType&) {
+  return true;
+}
+
+template <unsigned R, typename iType0, class MapType, typename iType1,
+          class... Args>
+KOKKOS_INLINE_FUNCTION bool dyn_rank_view_verify_operator_bounds(
+    const iType0& rank, const MapType& map, const iType1& i, Args... args) {
+  if (static_cast<iType0>(R) < rank) {
+    return (size_t(i) < map.extent(R)) &&
+           dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
+  } else if (i != 0) {
+    KOKKOS_IMPL_DO_NOT_USE_PRINTF(
+        "DynRankView Debug Bounds Checking Error: at rank %u\n  Extra "
+        "arguments beyond the rank must be zero \n",
+        R);
+    return (false) &&
+           dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
+  } else {
+    return (true) &&
+           dyn_rank_view_verify_operator_bounds<R + 1>(rank, map, args...);
+  }
+}
+
+template <unsigned, class MapType>
+inline void dyn_rank_view_error_operator_bounds(char*, int, const MapType&) {}
+
+template <unsigned R, class MapType, class iType, class... Args>
+inline void dyn_rank_view_error_operator_bounds(char* buf, int len,
+                                                const MapType& map,
+                                                const iType& i, Args... args) {
+  const int n = snprintf(
+      buf, len, " %ld < %ld %c", static_cast<unsigned long>(i),
+      static_cast<unsigned long>(map.extent(R)), (sizeof...(Args) ? ',' : ')'));
+  dyn_rank_view_error_operator_bounds<R + 1>(buf + n, len - n, map, args...);
+}
+
+// op_rank = rank of the operator version that was called
+template <typename MemorySpace, typename iType0, typename iType1, class MapType,
+          class... Args>
+KOKKOS_INLINE_FUNCTION void dyn_rank_view_verify_operator_bounds(
+    const iType0& op_rank, const iType1& rank,
+    const Kokkos::Impl::SharedAllocationTracker& tracker, const MapType& map,
+    Args... args) {
+  if (static_cast<iType0>(rank) > op_rank) {
+    Kokkos::abort(
+        "DynRankView Bounds Checking Error: Need at least rank arguments to "
+        "the operator()");
+  }
+
+  if (!dyn_rank_view_verify_operator_bounds<0>(rank, map, args...)) {
+    KOKKOS_IF_ON_HOST(
+        (enum {LEN = 1024}; char buffer[LEN];
+         const std::string label = tracker.template get_label<MemorySpace>();
+         int n = snprintf(buffer, LEN, "DynRankView bounds error of view %s (",
+                          label.c_str());
+         dyn_rank_view_error_operator_bounds<0>(buffer + n, LEN - n, map,
+                                                args...);
+         Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
+
+    KOKKOS_IF_ON_DEVICE(
+        ((void)tracker; Kokkos::abort("DynRankView bounds error");))
+  }
+}
+
+/** \brief  Assign compatible default mappings */
+struct ViewToDynRankViewTag {};
+
+}  // namespace Impl
+
+namespace Impl {
+
+template <class DstTraits, class SrcTraits>
+class ViewMapping<
+    DstTraits, SrcTraits,
+    std::enable_if_t<(std::is_same<typename DstTraits::memory_space,
+                                   typename SrcTraits::memory_space>::value &&
+                      std::is_void<typename DstTraits::specialize>::value &&
+                      std::is_void<typename SrcTraits::specialize>::value &&
+                      (std::is_same<typename DstTraits::array_layout,
+                                    typename SrcTraits::array_layout>::value ||
+                       ((std::is_same<typename DstTraits::array_layout,
+                                      Kokkos::LayoutLeft>::value ||
+                         std::is_same<typename DstTraits::array_layout,
+                                      Kokkos::LayoutRight>::value ||
+                         std::is_same<typename DstTraits::array_layout,
+                                      Kokkos::LayoutStride>::value) &&
+                        (std::is_same<typename SrcTraits::array_layout,
+                                      Kokkos::LayoutLeft>::value ||
+                         std::is_same<typename SrcTraits::array_layout,
+                                      Kokkos::LayoutRight>::value ||
+                         std::is_same<typename SrcTraits::array_layout,
+                                      Kokkos::LayoutStride>::value)))),
+                     Kokkos::Impl::ViewToDynRankViewTag>> {
+ private:
+  enum {
+    is_assignable_value_type =
+        std::is_same<typename DstTraits::value_type,
+                     typename SrcTraits::value_type>::value ||
+        std::is_same<typename DstTraits::value_type,
+                     typename SrcTraits::const_value_type>::value
+  };
+
+  enum {
+    is_assignable_layout =
+        std::is_same<typename DstTraits::array_layout,
+                     typename SrcTraits::array_layout>::value ||
+        std::is_same<typename DstTraits::array_layout,
+                     Kokkos::LayoutStride>::value
+  };
+
+ public:
+  enum { is_assignable = is_assignable_value_type && is_assignable_layout };
+
+  using DstType = ViewMapping<DstTraits, typename DstTraits::specialize>;
+  using SrcType = ViewMapping<SrcTraits, typename SrcTraits::specialize>;
+
+  template <typename DT, typename... DP, typename ST, typename... SP>
+  KOKKOS_INLINE_FUNCTION static void assign(
+      Kokkos::DynRankView<DT, DP...>& dst, const Kokkos::View<ST, SP...>& src) {
+    static_assert(
+        is_assignable_value_type,
+        "View assignment must have same value type or const = non-const");
+
+    static_assert(
+        is_assignable_layout,
+        "View assignment must have compatible layout or have rank <= 1");
+
+    // Removed dimension checks...
+
+    using dst_offset_type   = typename DstType::offset_type;
+    dst.m_map.m_impl_offset = dst_offset_type(
+        std::integral_constant<unsigned, 0>(),
+        src.layout());  // Check this for integer input1 for padding, etc
+    dst.m_map.m_impl_handle = Kokkos::Impl::ViewDataHandle<DstTraits>::assign(
+        src.m_map.m_impl_handle, src.m_track.m_tracker);
+    dst.m_track.assign(src.m_track.m_tracker, DstTraits::is_managed);
+    dst.m_rank = src.Rank;
+  }
+};
+
+}  // namespace Impl
+
+/* \class DynRankView
+ * \brief Container that creates a Kokkos view with rank determined at runtime.
+ *   Essentially this is a rank 7 view
+ *
+ *   Changes from View
+ *   1. The rank of the DynRankView is returned by the method rank()
+ *   2. Max rank of a DynRankView is 7
+ *   3. subview called with 'subview(...)' or 'subdynrankview(...)' (backward
+ * compatibility)
+ *   4. Every subview is returned with LayoutStride
+ *   5. Copy and Copy-Assign View to DynRankView
+ *   6. deep_copy between Views and DynRankViews
+ *   7. rank( view ); returns the rank of View or DynRankView
+ *
+ */
+
+template <class>
+struct is_dyn_rank_view : public std::false_type {};
+
+template <class D, class... P>
+struct is_dyn_rank_view<Kokkos::DynRankView<D, P...>> : public std::true_type {
+};
+
+template <typename DataType, class... Properties>
+class DynRankView : public ViewTraits<DataType, Properties...> {
+  static_assert(!std::is_array<DataType>::value &&
+                    !std::is_pointer<DataType>::value,
+                "Cannot template DynRankView with array or pointer datatype - "
+                "must be pod");
+
+ private:
+  template <class, class...>
+  friend class DynRankView;
+  template <class, class...>
+  friend class Kokkos::Impl::ViewMapping;
+
+ public:
+  using drvtraits = ViewTraits<DataType, Properties...>;
+
+  using view_type = View<DataType*******, Properties...>;
+
+  using traits = ViewTraits<DataType*******, Properties...>;
+
+ private:
+  using map_type =
+      Kokkos::Impl::ViewMapping<traits, typename traits::specialize>;
+  using track_type = Kokkos::Impl::SharedAllocationTracker;
+
+  track_type m_track;
+  map_type m_map;
+  unsigned m_rank;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  view_type& DownCast() const { return (view_type&)(*this); }
+  KOKKOS_INLINE_FUNCTION
+  const view_type& ConstDownCast() const { return (const view_type&)(*this); }
+
+  // Types below - at least the HostMirror requires the value_type, NOT the rank
+  // 7 data_type of the traits
+
+  /** \brief  Compatible view of array of scalar types */
+  using array_type = DynRankView<
+      typename drvtraits::scalar_array_type, typename drvtraits::array_layout,
+      typename drvtraits::device_type, typename drvtraits::memory_traits>;
+
+  /** \brief  Compatible view of const data type */
+  using const_type = DynRankView<
+      typename drvtraits::const_data_type, typename drvtraits::array_layout,
+      typename drvtraits::device_type, typename drvtraits::memory_traits>;
+
+  /** \brief  Compatible view of non-const data type */
+  using non_const_type = DynRankView<
+      typename drvtraits::non_const_data_type, typename drvtraits::array_layout,
+      typename drvtraits::device_type, typename drvtraits::memory_traits>;
+
+  /** \brief  Compatible HostMirror view */
+  using HostMirror = DynRankView<typename drvtraits::non_const_data_type,
+                                 typename drvtraits::array_layout,
+                                 typename drvtraits::host_mirror_space>;
+
+  //----------------------------------------
+  // Domain rank and extents
+
+  //  enum { Rank = map_type::Rank }; //Will be dyn rank of 7 always, keep the
+  //  enum?
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, size_t>
+  extent(const iType& r) const {
+    return m_map.extent(r);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, int>
+  extent_int(const iType& r) const {
+    return static_cast<int>(m_map.extent(r));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr typename traits::array_layout layout() const;
+
+  //----------------------------------------
+  /*  Deprecate all 'dimension' functions in favor of
+   *  ISO/C++ vocabulary 'extent'.
+   */
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t size() const {
+    return m_map.extent(0) * m_map.extent(1) * m_map.extent(2) *
+           m_map.extent(3) * m_map.extent(4) * m_map.extent(5) *
+           m_map.extent(6) * m_map.extent(7);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
+    return m_map.stride_0();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
+    return m_map.stride_1();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
+    return m_map.stride_2();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
+    return m_map.stride_3();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
+    return m_map.stride_4();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
+    return m_map.stride_5();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
+    return m_map.stride_6();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
+    return m_map.stride_7();
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    m_map.stride(s);
+  }
+
+  //----------------------------------------
+  // Range span is the span which contains all members.
+
+  using reference_type = typename map_type::reference_type;
+  using pointer_type   = typename map_type::pointer_type;
+
+  enum {
+    reference_type_is_lvalue_reference =
+        std::is_lvalue_reference<reference_type>::value
+  };
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_map.span(); }
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return m_map.span_is_contiguous();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
+    return m_map.data();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return (m_map.data() != nullptr);
+  }
+
+  //----------------------------------------
+  // Allow specializations to query their specialized map
+  KOKKOS_INLINE_FUNCTION
+  const Kokkos::Impl::ViewMapping<traits, typename traits::specialize>&
+  impl_map() const {
+    return m_map;
+  }
+
+  //----------------------------------------
+
+ private:
+  enum {
+    is_layout_left =
+        std::is_same<typename traits::array_layout, Kokkos::LayoutLeft>::value,
+
+    is_layout_right =
+        std::is_same<typename traits::array_layout, Kokkos::LayoutRight>::value,
+
+    is_layout_stride = std::is_same<typename traits::array_layout,
+                                    Kokkos::LayoutStride>::value,
+
+    is_default_map = std::is_void<typename traits::specialize>::value &&
+                     (is_layout_left || is_layout_right || is_layout_stride)
+  };
+
+// Bounds checking macros
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+
+// rank of the calling operator - included as first argument in ARG
+#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(ARG)                             \
+  Kokkos::Impl::runtime_check_memory_access_violation<                    \
+      typename traits::memory_space>(                                     \
+      "Kokkos::DynRankView ERROR: attempt to access inaccessible memory " \
+      "space");                                                           \
+  Kokkos::Impl::dyn_rank_view_verify_operator_bounds<                     \
+      typename traits::memory_space>                                      \
+      ARG;
+
+#else
+
+#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(ARG)                             \
+  Kokkos::Impl::runtime_check_memory_access_violation<                    \
+      typename traits::memory_space>(                                     \
+      "Kokkos::DynRankView ERROR: attempt to access inaccessible memory " \
+      "space");
+
+#endif
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  constexpr unsigned rank() const { return m_rank; }
+
+  // operators ()
+  // Rank 0
+  KOKKOS_INLINE_FUNCTION
+  reference_type operator()() const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((0, this->rank(), m_track, m_map))
+    return impl_map().reference();
+    // return m_map.reference(0,0,0,0,0,0,0);
+  }
+
+  // Rank 1
+  // This assumes a contiguous underlying memory (i.e. no padding, no
+  // striding...)
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      std::is_same<typename drvtraits::value_type,
+                   typename drvtraits::scalar_array_type>::value &&
+          std::is_integral<iType>::value,
+      reference_type>
+  operator[](const iType& i0) const {
+    // Phalanx is violating this, since they use the operator to access ALL
+    // elements in the allocation KOKKOS_IMPL_VIEW_OPERATOR_VERIFY( (1 ,
+    // this->rank(), m_track, m_map) )
+    return data()[i0];
+  }
+
+  // This assumes a contiguous underlying memory (i.e. no padding, no
+  // striding... AND a Trilinos/Sacado scalar type )
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      !std::is_same<typename drvtraits::value_type,
+                    typename drvtraits::scalar_array_type>::value &&
+          std::is_integral<iType>::value,
+      reference_type>
+  operator[](const iType& i0) const {
+    //      auto map = impl_map();
+    const size_t dim_scalar = m_map.dimension_scalar();
+    const size_t bytes      = this->span() / dim_scalar;
+
+    using tmp_view_type = Kokkos::View<
+        DataType*, typename traits::array_layout, typename traits::device_type,
+        Kokkos::MemoryTraits<traits::memory_traits::is_unmanaged |
+                             traits::memory_traits::is_random_access |
+                             traits::memory_traits::is_atomic>>;
+    tmp_view_type rankone_view(this->data(), bytes, dim_scalar);
+    return rankone_view(i0);
+  }
+
+  // Rank 1 parenthesis
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<(std::is_void<typename traits::specialize>::value &&
+                        std::is_integral<iType>::value),
+                       reference_type>
+      operator()(const iType& i0) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
+    return m_map.reference(i0);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename traits::specialize>::value &&
+                         std::is_integral<iType>::value),
+                       reference_type>
+      operator()(const iType& i0) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
+    return m_map.reference(i0, 0, 0, 0, 0, 0, 0);
+  }
+
+  // Rank 2
+  template <typename iType0, typename iType1>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value),
+      reference_type>
+  operator()(const iType0& i0, const iType1& i1) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
+    return m_map.reference(i0, i1);
+  }
+
+  template <typename iType0, typename iType1>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      operator()(const iType0& i0, const iType1& i1) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
+    return m_map.reference(i0, i1, 0, 0, 0, 0, 0);
+  }
+
+  // Rank 3
+  template <typename iType0, typename iType1, typename iType2>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value),
+      reference_type>
+  operator()(const iType0& i0, const iType1& i1, const iType2& i2) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (3, this->rank(), m_track, m_map, i0, i1, i2))
+    return m_map.reference(i0, i1, i2);
+  }
+
+  template <typename iType0, typename iType1, typename iType2>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      operator()(const iType0& i0, const iType1& i1, const iType2& i2) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (3, this->rank(), m_track, m_map, i0, i1, i2))
+    return m_map.reference(i0, i1, i2, 0, 0, 0, 0);
+  }
+
+  // Rank 4
+  template <typename iType0, typename iType1, typename iType2, typename iType3>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value),
+      reference_type>
+  operator()(const iType0& i0, const iType1& i1, const iType2& i2,
+             const iType3& i3) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
+    return m_map.reference(i0, i1, i2, i3);
+  }
+
+  template <typename iType0, typename iType1, typename iType2, typename iType3>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      operator()(const iType0& i0, const iType1& i1, const iType2& i2,
+                 const iType3& i3) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
+    return m_map.reference(i0, i1, i2, i3, 0, 0, 0);
+  }
+
+  // Rank 5
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
+       std::is_integral<iType4>::value),
+      reference_type>
+  operator()(const iType0& i0, const iType1& i1, const iType2& i2,
+             const iType3& i3, const iType4& i4) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
+    return m_map.reference(i0, i1, i2, i3, i4);
+  }
+
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      operator()(const iType0& i0, const iType1& i1, const iType2& i2,
+                 const iType3& i3, const iType4& i4) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
+    return m_map.reference(i0, i1, i2, i3, i4, 0, 0);
+  }
+
+  // Rank 6
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4, typename iType5>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
+       std::is_integral<iType4>::value && std::is_integral<iType5>::value),
+      reference_type>
+  operator()(const iType0& i0, const iType1& i1, const iType2& i2,
+             const iType3& i3, const iType4& i4, const iType5& i5) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
+    return m_map.reference(i0, i1, i2, i3, i4, i5);
+  }
+
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4, typename iType5>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      operator()(const iType0& i0, const iType1& i1, const iType2& i2,
+                 const iType3& i3, const iType4& i4, const iType5& i5) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
+    return m_map.reference(i0, i1, i2, i3, i4, i5, 0);
+  }
+
+  // Rank 7
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4, typename iType5, typename iType6>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
+       std::is_integral<iType4>::value && std::is_integral<iType5>::value &&
+       std::is_integral<iType6>::value),
+      reference_type>
+  operator()(const iType0& i0, const iType1& i1, const iType2& i2,
+             const iType3& i3, const iType4& i4, const iType5& i5,
+             const iType6& i6) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (7, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5, i6))
+    return m_map.reference(i0, i1, i2, i3, i4, i5, i6);
+  }
+
+  // Rank 0
+  KOKKOS_INLINE_FUNCTION
+  reference_type access() const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((0, this->rank(), m_track, m_map))
+    return impl_map().reference();
+    // return m_map.reference(0,0,0,0,0,0,0);
+  }
+
+  // Rank 1
+  // Rank 1 parenthesis
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<(std::is_void<typename traits::specialize>::value &&
+                        std::is_integral<iType>::value),
+                       reference_type>
+      access(const iType& i0) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
+    return m_map.reference(i0);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename traits::specialize>::value &&
+                         std::is_integral<iType>::value),
+                       reference_type>
+      access(const iType& i0) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((1, this->rank(), m_track, m_map, i0))
+    return m_map.reference(i0, 0, 0, 0, 0, 0, 0);
+  }
+
+  // Rank 2
+  template <typename iType0, typename iType1>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value),
+      reference_type>
+  access(const iType0& i0, const iType1& i1) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
+    return m_map.reference(i0, i1);
+  }
+
+  template <typename iType0, typename iType1>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      access(const iType0& i0, const iType1& i1) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY((2, this->rank(), m_track, m_map, i0, i1))
+    return m_map.reference(i0, i1, 0, 0, 0, 0, 0);
+  }
+
+  // Rank 3
+  template <typename iType0, typename iType1, typename iType2>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value),
+      reference_type>
+  access(const iType0& i0, const iType1& i1, const iType2& i2) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (3, this->rank(), m_track, m_map, i0, i1, i2))
+    return m_map.reference(i0, i1, i2);
+  }
+
+  template <typename iType0, typename iType1, typename iType2>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      access(const iType0& i0, const iType1& i1, const iType2& i2) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (3, this->rank(), m_track, m_map, i0, i1, i2))
+    return m_map.reference(i0, i1, i2, 0, 0, 0, 0);
+  }
+
+  // Rank 4
+  template <typename iType0, typename iType1, typename iType2, typename iType3>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value),
+      reference_type>
+  access(const iType0& i0, const iType1& i1, const iType2& i2,
+         const iType3& i3) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
+    return m_map.reference(i0, i1, i2, i3);
+  }
+
+  template <typename iType0, typename iType1, typename iType2, typename iType3>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      access(const iType0& i0, const iType1& i1, const iType2& i2,
+             const iType3& i3) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (4, this->rank(), m_track, m_map, i0, i1, i2, i3))
+    return m_map.reference(i0, i1, i2, i3, 0, 0, 0);
+  }
+
+  // Rank 5
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
+       std::is_integral<iType4>::value),
+      reference_type>
+  access(const iType0& i0, const iType1& i1, const iType2& i2, const iType3& i3,
+         const iType4& i4) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
+    return m_map.reference(i0, i1, i2, i3, i4);
+  }
+
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      access(const iType0& i0, const iType1& i1, const iType2& i2,
+             const iType3& i3, const iType4& i4) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (5, this->rank(), m_track, m_map, i0, i1, i2, i3, i4))
+    return m_map.reference(i0, i1, i2, i3, i4, 0, 0);
+  }
+
+  // Rank 6
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4, typename iType5>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_void<typename traits::specialize>::value &&
+       std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
+       std::is_integral<iType4>::value && std::is_integral<iType5>::value),
+      reference_type>
+  access(const iType0& i0, const iType1& i1, const iType2& i2, const iType3& i3,
+         const iType4& i4, const iType5& i5) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
+    return m_map.reference(i0, i1, i2, i3, i4, i5);
+  }
+
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4, typename iType5>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!(std::is_void<typename drvtraits::specialize>::value &&
+                         std::is_integral<iType0>::value),
+                       reference_type>
+      access(const iType0& i0, const iType1& i1, const iType2& i2,
+             const iType3& i3, const iType4& i4, const iType5& i5) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (6, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5))
+    return m_map.reference(i0, i1, i2, i3, i4, i5, 0);
+  }
+
+  // Rank 7
+  template <typename iType0, typename iType1, typename iType2, typename iType3,
+            typename iType4, typename iType5, typename iType6>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      (std::is_integral<iType0>::value && std::is_integral<iType1>::value &&
+       std::is_integral<iType2>::value && std::is_integral<iType3>::value &&
+       std::is_integral<iType4>::value && std::is_integral<iType5>::value &&
+       std::is_integral<iType6>::value),
+      reference_type>
+  access(const iType0& i0, const iType1& i1, const iType2& i2, const iType3& i3,
+         const iType4& i4, const iType5& i5, const iType6& i6) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(
+        (7, this->rank(), m_track, m_map, i0, i1, i2, i3, i4, i5, i6))
+    return m_map.reference(i0, i1, i2, i3, i4, i5, i6);
+  }
+
+#undef KOKKOS_IMPL_VIEW_OPERATOR_VERIFY
+
+  //----------------------------------------
+  // Standard constructor, destructor, and assignment operators...
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~DynRankView() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  DynRankView() : m_track(), m_map(), m_rank() {}  // Default ctor
+
+  KOKKOS_INLINE_FUNCTION
+  DynRankView(const DynRankView& rhs)
+      : m_track(rhs.m_track), m_map(rhs.m_map), m_rank(rhs.m_rank) {}
+
+  KOKKOS_INLINE_FUNCTION
+  DynRankView(DynRankView&& rhs)
+      : m_track(rhs.m_track), m_map(rhs.m_map), m_rank(rhs.m_rank) {}
+
+  KOKKOS_INLINE_FUNCTION
+  DynRankView& operator=(const DynRankView& rhs) {
+    m_track = rhs.m_track;
+    m_map   = rhs.m_map;
+    m_rank  = rhs.m_rank;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  DynRankView& operator=(DynRankView&& rhs) {
+    m_track = rhs.m_track;
+    m_map   = rhs.m_map;
+    m_rank  = rhs.m_rank;
+    return *this;
+  }
+
+  //----------------------------------------
+  // Compatible view copy constructor and assignment
+  // may assign unmanaged from managed.
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION DynRankView(const DynRankView<RT, RP...>& rhs)
+      : m_track(rhs.m_track, traits::is_managed), m_map(), m_rank(rhs.m_rank) {
+    using SrcTraits = typename DynRankView<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits,
+                                              typename traits::specialize>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible DynRankView copy construction");
+    Mapping::assign(m_map, rhs.m_map, rhs.m_track);
+  }
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION DynRankView& operator=(
+      const DynRankView<RT, RP...>& rhs) {
+    using SrcTraits = typename DynRankView<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits,
+                                              typename traits::specialize>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible DynRankView copy construction");
+    Mapping::assign(m_map, rhs.m_map, rhs.m_track);
+    m_track.assign(rhs.m_track, traits::is_managed);
+    m_rank = rhs.rank();
+    return *this;
+  }
+
+  // Copy/Assign View to DynRankView
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION DynRankView(const View<RT, RP...>& rhs)
+      : m_track(), m_map(), m_rank(rhs.Rank) {
+    using SrcTraits = typename View<RT, RP...>::traits;
+    using Mapping =
+        Kokkos::Impl::ViewMapping<traits, SrcTraits,
+                                  Kokkos::Impl::ViewToDynRankViewTag>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible View to DynRankView copy construction");
+    Mapping::assign(*this, rhs);
+  }
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION DynRankView& operator=(const View<RT, RP...>& rhs) {
+    using SrcTraits = typename View<RT, RP...>::traits;
+    using Mapping =
+        Kokkos::Impl::ViewMapping<traits, SrcTraits,
+                                  Kokkos::Impl::ViewToDynRankViewTag>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible View to DynRankView copy assignment");
+    Mapping::assign(*this, rhs);
+    return *this;
+  }
+
+  //----------------------------------------
+  // Allocation tracking properties
+
+  KOKKOS_INLINE_FUNCTION
+  int use_count() const { return m_track.use_count(); }
+
+  inline const std::string label() const {
+    return m_track.template get_label<typename traits::memory_space>();
+  }
+
+  //----------------------------------------
+  // Allocation according to allocation properties and array layout
+  // unused arg_layout dimensions must be set to KOKKOS_INVALID_INDEX so that
+  // rank deduction can properly take place
+  template <class... P>
+  explicit inline DynRankView(
+      const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+                       typename traits::array_layout> const& arg_layout)
+      : m_track(),
+        m_map(),
+        m_rank(Impl::DynRankDimTraits<typename traits::specialize>::
+                   template computeRank<typename traits::array_layout, P...>(
+                       arg_prop, arg_layout)) {
+    // Append layout and spaces if not input
+    using alloc_prop_input = Kokkos::Impl::ViewCtorProp<P...>;
+
+    // use 'std::integral_constant<unsigned,I>' for non-types
+    // to avoid duplicate class error.
+    using alloc_prop = Kokkos::Impl::ViewCtorProp<
+        P...,
+        std::conditional_t<alloc_prop_input::has_label,
+                           std::integral_constant<unsigned, 0>, std::string>,
+        std::conditional_t<alloc_prop_input::has_memory_space,
+                           std::integral_constant<unsigned, 1>,
+                           typename traits::device_type::memory_space>,
+        std::conditional_t<alloc_prop_input::has_execution_space,
+                           std::integral_constant<unsigned, 2>,
+                           typename traits::device_type::execution_space>>;
+
+    static_assert(traits::is_managed,
+                  "View allocation constructor requires managed memory");
+
+    if (alloc_prop::initialize &&
+        !alloc_prop::execution_space::impl_is_initialized()) {
+      // If initializing view data then
+      // the execution space must be initialized.
+      Kokkos::Impl::throw_runtime_exception(
+          "Constructing DynRankView and initializing data with uninitialized "
+          "execution space");
+    }
+
+    // Copy the input allocation properties with possibly defaulted properties
+    alloc_prop prop_copy(arg_prop);
+
+//------------------------------------------------------------
+#if defined(KOKKOS_ENABLE_CUDA)
+    // If allocating in CudaUVMSpace must fence before and after
+    // the allocation to protect against possible concurrent access
+    // on the CPU and the GPU.
+    // Fence using the trait's executon space (which will be Kokkos::Cuda)
+    // to avoid incomplete type errors from usng Kokkos::Cuda directly.
+    if (std::is_same<Kokkos::CudaUVMSpace,
+                     typename traits::device_type::memory_space>::value) {
+      typename traits::device_type::memory_space::execution_space().fence(
+          "Kokkos::DynRankView<>::DynRankView: fence before UVM allocation");
+    }
+#endif
+    //------------------------------------------------------------
+
+    Kokkos::Impl::SharedAllocationRecord<>* record = m_map.allocate_shared(
+        prop_copy,
+        Impl::DynRankDimTraits<typename traits::specialize>::
+            template createLayout<traits, P...>(arg_prop, arg_layout),
+        Impl::ViewCtorProp<P...>::has_execution_space);
+
+//------------------------------------------------------------
+#if defined(KOKKOS_ENABLE_CUDA)
+    if (std::is_same<Kokkos::CudaUVMSpace,
+                     typename traits::device_type::memory_space>::value) {
+      typename traits::device_type::memory_space::execution_space().fence(
+          "Kokkos::DynRankView<>::DynRankView: fence after UVM allocation");
+    }
+#endif
+    //------------------------------------------------------------
+
+    // Setup and initialization complete, start tracking
+    m_track.assign_allocated_record_to_uninitialized(record);
+  }
+
+  // Wrappers
+  template <class... P>
+  explicit KOKKOS_INLINE_FUNCTION DynRankView(
+      const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+                       typename traits::array_layout> const& arg_layout)
+      : m_track()  // No memory tracking
+        ,
+        m_map(arg_prop,
+              Impl::DynRankDimTraits<typename traits::specialize>::
+                  template createLayout<traits, P...>(arg_prop, arg_layout)),
+        m_rank(Impl::DynRankDimTraits<typename traits::specialize>::
+                   template computeRank<typename traits::array_layout, P...>(
+                       arg_prop, arg_layout)) {
+    static_assert(
+        std::is_same<pointer_type,
+                     typename Impl::ViewCtorProp<P...>::pointer_type>::value,
+        "Constructing DynRankView to wrap user memory must supply matching "
+        "pointer type");
+  }
+
+  //----------------------------------------
+  // Constructor(s)
+
+  // Simple dimension-only layout
+  template <class... P>
+  explicit inline DynRankView(
+      const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+                       size_t> const arg_N0 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N1                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N2                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N3                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N4                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N5                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N6                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N7                   = KOKKOS_INVALID_INDEX)
+      : DynRankView(arg_prop, typename traits::array_layout(
+                                  arg_N0, arg_N1, arg_N2, arg_N3, arg_N4,
+                                  arg_N5, arg_N6, arg_N7)) {}
+
+  template <class... P>
+  explicit KOKKOS_INLINE_FUNCTION DynRankView(
+      const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+                       size_t> const arg_N0 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N1                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N2                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N3                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N4                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N5                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N6                   = KOKKOS_INVALID_INDEX,
+      const size_t arg_N7                   = KOKKOS_INVALID_INDEX)
+      : DynRankView(arg_prop, typename traits::array_layout(
+                                  arg_N0, arg_N1, arg_N2, arg_N3, arg_N4,
+                                  arg_N5, arg_N6, arg_N7)) {}
+
+  // Allocate with label and layout
+  template <typename Label>
+  explicit inline DynRankView(
+      const Label& arg_label,
+      std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
+                       typename traits::array_layout> const& arg_layout)
+      : DynRankView(Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
+                    arg_layout) {}
+
+  // Allocate label and layout, must disambiguate from subview constructor
+  template <typename Label>
+  explicit inline DynRankView(
+      const Label& arg_label,
+      std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value, const size_t>
+          arg_N0          = KOKKOS_INVALID_INDEX,
+      const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+      : DynRankView(
+            Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
+            typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+                                          arg_N4, arg_N5, arg_N6, arg_N7)) {}
+
+  //----------------------------------------
+  // Memory span required to wrap these dimensions.
+  static constexpr size_t required_allocation_size(
+      const size_t arg_N0 = 0, const size_t arg_N1 = 0, const size_t arg_N2 = 0,
+      const size_t arg_N3 = 0, const size_t arg_N4 = 0, const size_t arg_N5 = 0,
+      const size_t arg_N6 = 0, const size_t arg_N7 = 0) {
+    return map_type::memory_span(typename traits::array_layout(
+        arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7));
+  }
+
+  explicit KOKKOS_INLINE_FUNCTION DynRankView(
+      pointer_type arg_ptr, const size_t arg_N0 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+      : DynRankView(Kokkos::Impl::ViewCtorProp<pointer_type>(arg_ptr), arg_N0,
+                    arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7) {}
+
+  explicit KOKKOS_INLINE_FUNCTION DynRankView(
+      pointer_type arg_ptr, typename traits::array_layout& arg_layout)
+      : DynRankView(Kokkos::Impl::ViewCtorProp<pointer_type>(arg_ptr),
+                    arg_layout) {}
+
+  //----------------------------------------
+  // Shared scratch memory constructor
+
+  static inline size_t shmem_size(const size_t arg_N0 = KOKKOS_INVALID_INDEX,
+                                  const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+                                  const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+                                  const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+                                  const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+                                  const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+                                  const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+                                  const size_t arg_N7 = KOKKOS_INVALID_INDEX) {
+    const size_t num_passed_args =
+        (arg_N0 != KOKKOS_INVALID_INDEX) + (arg_N1 != KOKKOS_INVALID_INDEX) +
+        (arg_N2 != KOKKOS_INVALID_INDEX) + (arg_N3 != KOKKOS_INVALID_INDEX) +
+        (arg_N4 != KOKKOS_INVALID_INDEX) + (arg_N5 != KOKKOS_INVALID_INDEX) +
+        (arg_N6 != KOKKOS_INVALID_INDEX) + (arg_N7 != KOKKOS_INVALID_INDEX);
+
+    if (std::is_void<typename traits::specialize>::value &&
+        num_passed_args != traits::rank_dynamic) {
+      Kokkos::abort(
+          "Kokkos::View::shmem_size() rank_dynamic != number of arguments.\n");
+    }
+    {}
+
+    return map_type::memory_span(typename traits::array_layout(
+        arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7));
+  }
+
+  explicit KOKKOS_INLINE_FUNCTION DynRankView(
+      const typename traits::execution_space::scratch_memory_space& arg_space,
+      const typename traits::array_layout& arg_layout)
+      : DynRankView(
+            Kokkos::Impl::ViewCtorProp<pointer_type>(
+                reinterpret_cast<pointer_type>(
+                    arg_space.get_shmem(map_type::memory_span(
+                        Impl::DynRankDimTraits<typename traits::specialize>::
+                            createLayout(arg_layout)  // is this correct?
+                        )))),
+            arg_layout) {}
+
+  explicit KOKKOS_INLINE_FUNCTION DynRankView(
+      const typename traits::execution_space::scratch_memory_space& arg_space,
+      const size_t arg_N0 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+      const size_t arg_N7 = KOKKOS_INVALID_INDEX)
+
+      : DynRankView(
+            Kokkos::Impl::ViewCtorProp<pointer_type>(
+                reinterpret_cast<pointer_type>(
+                    arg_space.get_shmem(map_type::memory_span(
+                        Impl::DynRankDimTraits<typename traits::specialize>::
+                            createLayout(typename traits::array_layout(
+                                arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5,
+                                arg_N6, arg_N7)))))),
+            typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+                                          arg_N4, arg_N5, arg_N6, arg_N7)) {}
+};
+
+template <typename D, class... P>
+KOKKOS_INLINE_FUNCTION constexpr unsigned rank(
+    const DynRankView<D, P...>& DRV) {
+  return DRV.rank();
+}  // needed for transition to common constexpr method in view and dynrankview
+   // to return rank
+
+//----------------------------------------------------------------------------
+// Subview mapping.
+// Deduce destination view type from source view traits and subview arguments
+
+namespace Impl {
+
+struct DynRankSubviewTag {};
+
+}  // namespace Impl
+
+namespace Impl {
+
+template <class SrcTraits, class... Args>
+class ViewMapping<
+    std::enable_if_t<(std::is_void<typename SrcTraits::specialize>::value &&
+                      (std::is_same<typename SrcTraits::array_layout,
+                                    Kokkos::LayoutLeft>::value ||
+                       std::is_same<typename SrcTraits::array_layout,
+                                    Kokkos::LayoutRight>::value ||
+                       std::is_same<typename SrcTraits::array_layout,
+                                    Kokkos::LayoutStride>::value)),
+                     Kokkos::Impl::DynRankSubviewTag>,
+    SrcTraits, Args...> {
+ private:
+  enum {
+    RZ = false,
+    R0 = bool(is_integral_extent<0, Args...>::value),
+    R1 = bool(is_integral_extent<1, Args...>::value),
+    R2 = bool(is_integral_extent<2, Args...>::value),
+    R3 = bool(is_integral_extent<3, Args...>::value),
+    R4 = bool(is_integral_extent<4, Args...>::value),
+    R5 = bool(is_integral_extent<5, Args...>::value),
+    R6 = bool(is_integral_extent<6, Args...>::value)
+  };
+
+  enum {
+    rank = unsigned(R0) + unsigned(R1) + unsigned(R2) + unsigned(R3) +
+           unsigned(R4) + unsigned(R5) + unsigned(R6)
+  };
+
+  using array_layout = Kokkos::LayoutStride;
+
+  using value_type = typename SrcTraits::value_type;
+
+  using data_type = value_type*******;
+
+ public:
+  using traits_type = Kokkos::ViewTraits<data_type, array_layout,
+                                         typename SrcTraits::device_type,
+                                         typename SrcTraits::memory_traits>;
+
+  using type =
+      Kokkos::View<data_type, array_layout, typename SrcTraits::device_type,
+                   typename SrcTraits::memory_traits>;
+
+  template <class MemoryTraits>
+  struct apply {
+    static_assert(Kokkos::is_memory_traits<MemoryTraits>::value, "");
+
+    using traits_type =
+        Kokkos::ViewTraits<data_type, array_layout,
+                           typename SrcTraits::device_type, MemoryTraits>;
+
+    using type = Kokkos::View<data_type, array_layout,
+                              typename SrcTraits::device_type, MemoryTraits>;
+  };
+
+  using dimension = typename SrcTraits::dimension;
+
+  template <class Arg0 = int, class Arg1 = int, class Arg2 = int,
+            class Arg3 = int, class Arg4 = int, class Arg5 = int,
+            class Arg6 = int>
+  struct ExtentGenerator {
+    KOKKOS_INLINE_FUNCTION
+    static SubviewExtents<7, rank> generator(
+        const dimension& dim, Arg0 arg0 = Arg0(), Arg1 arg1 = Arg1(),
+        Arg2 arg2 = Arg2(), Arg3 arg3 = Arg3(), Arg4 arg4 = Arg4(),
+        Arg5 arg5 = Arg5(), Arg6 arg6 = Arg6()) {
+      return SubviewExtents<7, rank>(dim, arg0, arg1, arg2, arg3, arg4, arg5,
+                                     arg6);
+    }
+  };
+
+  using ret_type = Kokkos::DynRankView<value_type, array_layout,
+                                       typename SrcTraits::device_type,
+                                       typename SrcTraits::memory_traits>;
+
+  template <typename T, class... P>
+  KOKKOS_INLINE_FUNCTION static ret_type subview(
+      const unsigned src_rank, Kokkos::DynRankView<T, P...> const& src,
+      Args... args) {
+    using DstType = ViewMapping<traits_type, typename traits_type::specialize>;
+
+    using DstDimType = std::conditional_t<
+        (rank == 0), ViewDimension<>,
+        std::conditional_t<
+            (rank == 1), ViewDimension<0>,
+            std::conditional_t<
+                (rank == 2), ViewDimension<0, 0>,
+                std::conditional_t<
+                    (rank == 3), ViewDimension<0, 0, 0>,
+                    std::conditional_t<
+                        (rank == 4), ViewDimension<0, 0, 0, 0>,
+                        std::conditional_t<
+                            (rank == 5), ViewDimension<0, 0, 0, 0, 0>,
+                            std::conditional_t<
+                                (rank == 6), ViewDimension<0, 0, 0, 0, 0, 0>,
+                                ViewDimension<0, 0, 0, 0, 0, 0, 0>>>>>>>>;
+
+    using dst_offset_type = ViewOffset<DstDimType, Kokkos::LayoutStride>;
+    using dst_handle_type = typename DstType::handle_type;
+
+    ret_type dst;
+
+    const SubviewExtents<7, rank> extents = ExtentGenerator<Args...>::generator(
+        src.m_map.m_impl_offset.m_dim, args...);
+
+    dst_offset_type tempdst(src.m_map.m_impl_offset, extents);
+
+    dst.m_track = src.m_track;
+
+    dst.m_map.m_impl_offset.m_dim.N0 = tempdst.m_dim.N0;
+    dst.m_map.m_impl_offset.m_dim.N1 = tempdst.m_dim.N1;
+    dst.m_map.m_impl_offset.m_dim.N2 = tempdst.m_dim.N2;
+    dst.m_map.m_impl_offset.m_dim.N3 = tempdst.m_dim.N3;
+    dst.m_map.m_impl_offset.m_dim.N4 = tempdst.m_dim.N4;
+    dst.m_map.m_impl_offset.m_dim.N5 = tempdst.m_dim.N5;
+    dst.m_map.m_impl_offset.m_dim.N6 = tempdst.m_dim.N6;
+
+    dst.m_map.m_impl_offset.m_stride.S0 = tempdst.m_stride.S0;
+    dst.m_map.m_impl_offset.m_stride.S1 = tempdst.m_stride.S1;
+    dst.m_map.m_impl_offset.m_stride.S2 = tempdst.m_stride.S2;
+    dst.m_map.m_impl_offset.m_stride.S3 = tempdst.m_stride.S3;
+    dst.m_map.m_impl_offset.m_stride.S4 = tempdst.m_stride.S4;
+    dst.m_map.m_impl_offset.m_stride.S5 = tempdst.m_stride.S5;
+    dst.m_map.m_impl_offset.m_stride.S6 = tempdst.m_stride.S6;
+
+    dst.m_map.m_impl_handle =
+        dst_handle_type(src.m_map.m_impl_handle +
+                        src.m_map.m_impl_offset(
+                            extents.domain_offset(0), extents.domain_offset(1),
+                            extents.domain_offset(2), extents.domain_offset(3),
+                            extents.domain_offset(4), extents.domain_offset(5),
+                            extents.domain_offset(6)));
+
+    dst.m_rank =
+        (src_rank > 0 ? unsigned(R0) : 0) + (src_rank > 1 ? unsigned(R1) : 0) +
+        (src_rank > 2 ? unsigned(R2) : 0) + (src_rank > 3 ? unsigned(R3) : 0) +
+        (src_rank > 4 ? unsigned(R4) : 0) + (src_rank > 5 ? unsigned(R5) : 0) +
+        (src_rank > 6 ? unsigned(R6) : 0);
+
+    return dst;
+  }
+};
+
+}  // namespace Impl
+
+template <class V, class... Args>
+using Subdynrankview =
+    typename Kokkos::Impl::ViewMapping<Kokkos::Impl::DynRankSubviewTag, V,
+                                       Args...>::ret_type;
+
+template <class D, class... P, class... Args>
+KOKKOS_INLINE_FUNCTION Subdynrankview<ViewTraits<D*******, P...>, Args...>
+subdynrankview(const Kokkos::DynRankView<D, P...>& src, Args... args) {
+  if (src.rank() > sizeof...(Args))  // allow sizeof...(Args) >= src.rank(),
+                                     // ignore the remaining args
+  {
+    Kokkos::abort(
+        "subdynrankview: num of args must be >= rank of the source "
+        "DynRankView");
+  }
+
+  using metafcn =
+      Kokkos::Impl::ViewMapping<Kokkos::Impl::DynRankSubviewTag,
+                                Kokkos::ViewTraits<D*******, P...>, Args...>;
+
+  return metafcn::subview(src.rank(), src, args...);
+}
+
+// Wrapper to allow subview function name
+template <class D, class... P, class... Args>
+KOKKOS_INLINE_FUNCTION Subdynrankview<ViewTraits<D*******, P...>, Args...>
+subview(const Kokkos::DynRankView<D, P...>& src, Args... args) {
+  return subdynrankview(src, args...);
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+// overload == and !=
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const DynRankView<LT, LP...>& lhs,
+                                       const DynRankView<RT, RP...>& rhs) {
+  // Same data, layout, dimensions
+  using lhs_traits = ViewTraits<LT, LP...>;
+  using rhs_traits = ViewTraits<RT, RP...>;
+
+  return std::is_same<typename lhs_traits::const_value_type,
+                      typename rhs_traits::const_value_type>::value &&
+         std::is_same<typename lhs_traits::array_layout,
+                      typename rhs_traits::array_layout>::value &&
+         std::is_same<typename lhs_traits::memory_space,
+                      typename rhs_traits::memory_space>::value &&
+         lhs.rank() == rhs.rank() && lhs.data() == rhs.data() &&
+         lhs.span() == rhs.span() && lhs.extent(0) == rhs.extent(0) &&
+         lhs.extent(1) == rhs.extent(1) && lhs.extent(2) == rhs.extent(2) &&
+         lhs.extent(3) == rhs.extent(3) && lhs.extent(4) == rhs.extent(4) &&
+         lhs.extent(5) == rhs.extent(5) && lhs.extent(6) == rhs.extent(6) &&
+         lhs.extent(7) == rhs.extent(7);
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator!=(const DynRankView<LT, LP...>& lhs,
+                                       const DynRankView<RT, RP...>& rhs) {
+  return !(operator==(lhs, rhs));
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+namespace Kokkos {
+namespace Impl {
+
+template <class OutputView, class Enable = void>
+struct DynRankViewFill {
+  using const_value_type = typename OutputView::traits::const_value_type;
+
+  const OutputView output;
+  const_value_type input;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const size_t i0) const {
+    const size_t n1 = output.extent(1);
+    const size_t n2 = output.extent(2);
+    const size_t n3 = output.extent(3);
+    const size_t n4 = output.extent(4);
+    const size_t n5 = output.extent(5);
+    const size_t n6 = output.extent(6);
+
+    for (size_t i1 = 0; i1 < n1; ++i1) {
+      for (size_t i2 = 0; i2 < n2; ++i2) {
+        for (size_t i3 = 0; i3 < n3; ++i3) {
+          for (size_t i4 = 0; i4 < n4; ++i4) {
+            for (size_t i5 = 0; i5 < n5; ++i5) {
+              for (size_t i6 = 0; i6 < n6; ++i6) {
+                output.access(i0, i1, i2, i3, i4, i5, i6) = input;
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  DynRankViewFill(const OutputView& arg_out, const_value_type& arg_in)
+      : output(arg_out), input(arg_in) {
+    using execution_space = typename OutputView::execution_space;
+    using Policy          = Kokkos::RangePolicy<execution_space>;
+
+    Kokkos::parallel_for("Kokkos::DynRankViewFill", Policy(0, output.extent(0)),
+                         *this);
+  }
+};
+
+template <class OutputView>
+struct DynRankViewFill<OutputView, std::enable_if_t<OutputView::Rank == 0>> {
+  DynRankViewFill(const OutputView& dst,
+                  const typename OutputView::const_value_type& src) {
+    Kokkos::Impl::DeepCopy<typename OutputView::memory_space,
+                           Kokkos::HostSpace>(
+        dst.data(), &src, sizeof(typename OutputView::const_value_type));
+  }
+};
+
+template <class OutputView, class InputView,
+          class ExecSpace = typename OutputView::execution_space>
+struct DynRankViewRemap {
+  const OutputView output;
+  const InputView input;
+  const size_t n0;
+  const size_t n1;
+  const size_t n2;
+  const size_t n3;
+  const size_t n4;
+  const size_t n5;
+  const size_t n6;
+  const size_t n7;
+
+  DynRankViewRemap(const ExecSpace& exec_space, const OutputView& arg_out,
+                   const InputView& arg_in)
+      : output(arg_out),
+        input(arg_in),
+        n0(std::min((size_t)arg_out.extent(0), (size_t)arg_in.extent(0))),
+        n1(std::min((size_t)arg_out.extent(1), (size_t)arg_in.extent(1))),
+        n2(std::min((size_t)arg_out.extent(2), (size_t)arg_in.extent(2))),
+        n3(std::min((size_t)arg_out.extent(3), (size_t)arg_in.extent(3))),
+        n4(std::min((size_t)arg_out.extent(4), (size_t)arg_in.extent(4))),
+        n5(std::min((size_t)arg_out.extent(5), (size_t)arg_in.extent(5))),
+        n6(std::min((size_t)arg_out.extent(6), (size_t)arg_in.extent(6))),
+        n7(std::min((size_t)arg_out.extent(7), (size_t)arg_in.extent(7))) {
+    using Policy = Kokkos::RangePolicy<ExecSpace>;
+
+    Kokkos::parallel_for("Kokkos::DynRankViewRemap", Policy(exec_space, 0, n0),
+                         *this);
+  }
+
+  DynRankViewRemap(const OutputView& arg_out, const InputView& arg_in)
+      : output(arg_out),
+        input(arg_in),
+        n0(std::min((size_t)arg_out.extent(0), (size_t)arg_in.extent(0))),
+        n1(std::min((size_t)arg_out.extent(1), (size_t)arg_in.extent(1))),
+        n2(std::min((size_t)arg_out.extent(2), (size_t)arg_in.extent(2))),
+        n3(std::min((size_t)arg_out.extent(3), (size_t)arg_in.extent(3))),
+        n4(std::min((size_t)arg_out.extent(4), (size_t)arg_in.extent(4))),
+        n5(std::min((size_t)arg_out.extent(5), (size_t)arg_in.extent(5))),
+        n6(std::min((size_t)arg_out.extent(6), (size_t)arg_in.extent(6))),
+        n7(std::min((size_t)arg_out.extent(7), (size_t)arg_in.extent(7))) {
+    using Policy = Kokkos::RangePolicy<ExecSpace>;
+
+    Kokkos::parallel_for("Kokkos::DynRankViewRemap", Policy(0, n0), *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const size_t i0) const {
+    for (size_t i1 = 0; i1 < n1; ++i1) {
+      for (size_t i2 = 0; i2 < n2; ++i2) {
+        for (size_t i3 = 0; i3 < n3; ++i3) {
+          for (size_t i4 = 0; i4 < n4; ++i4) {
+            for (size_t i5 = 0; i5 < n5; ++i5) {
+              for (size_t i6 = 0; i6 < n6; ++i6) {
+                output.access(i0, i1, i2, i3, i4, i5, i6) =
+                    input.access(i0, i1, i2, i3, i4, i5, i6);
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+namespace Kokkos {
+
+namespace Impl {
+
+/* \brief Returns a View of the requested rank, aliasing the
+   underlying memory, to facilitate implementation of deep_copy() and
+   other routines that are defined on View */
+template <unsigned N, typename T, typename... Args>
+KOKKOS_FUNCTION auto as_view_of_rank_n(DynRankView<T, Args...> v) {
+  if (v.rank() != N) {
+    KOKKOS_IF_ON_HOST(
+        const std::string message =
+            "Converting DynRankView of rank " + std::to_string(v.rank()) +
+            " to a View of mis-matched rank " + std::to_string(N) + "!";
+        Kokkos::abort(message.c_str());)
+    KOKKOS_IF_ON_DEVICE(
+        Kokkos::abort("Converting DynRankView to a View of mis-matched rank!");)
+  }
+
+  return View<typename RankDataType<T, N>::type, Args...>(
+      v.data(), v.impl_map().layout());
+}
+
+template <typename Function, typename... Args>
+void apply_to_view_of_static_rank(Function&& f, DynRankView<Args...> a) {
+  switch (rank(a)) {
+    case 0: f(as_view_of_rank_n<0>(a)); break;
+    case 1: f(as_view_of_rank_n<1>(a)); break;
+    case 2: f(as_view_of_rank_n<2>(a)); break;
+    case 3: f(as_view_of_rank_n<3>(a)); break;
+    case 4: f(as_view_of_rank_n<4>(a)); break;
+    case 5: f(as_view_of_rank_n<5>(a)); break;
+    case 6: f(as_view_of_rank_n<6>(a)); break;
+    case 7: f(as_view_of_rank_n<7>(a)); break;
+    default:
+      KOKKOS_IF_ON_HOST(
+          Kokkos::abort(
+              std::string(
+                  "Trying to apply a function to a view of unexpected rank " +
+                  std::to_string(rank(a)))
+                  .c_str());)
+      KOKKOS_IF_ON_DEVICE(
+          Kokkos::abort(
+              "Trying to apply a function to a view of unexpected rank");)
+  }
+}
+
+}  // namespace Impl
+
+template <typename D, class... P>
+KOKKOS_INLINE_FUNCTION constexpr auto DynRankView<D, P...>::layout() const ->
+    typename traits::array_layout {
+  switch (rank()) {
+    case 0: return Impl::as_view_of_rank_n<0>(*this).layout();
+    case 1: return Impl::as_view_of_rank_n<1>(*this).layout();
+    case 2: return Impl::as_view_of_rank_n<2>(*this).layout();
+    case 3: return Impl::as_view_of_rank_n<3>(*this).layout();
+    case 4: return Impl::as_view_of_rank_n<4>(*this).layout();
+    case 5: return Impl::as_view_of_rank_n<5>(*this).layout();
+    case 6: return Impl::as_view_of_rank_n<6>(*this).layout();
+    case 7: return Impl::as_view_of_rank_n<7>(*this).layout();
+    default:
+      KOKKOS_IF_ON_HOST(
+          Kokkos::abort(
+              std::string(
+                  "Calling DynRankView::layout on DRV of unexpected rank " +
+                  std::to_string(rank()))
+                  .c_str());)
+      KOKKOS_IF_ON_DEVICE(
+          Kokkos::abort(
+              "Calling DynRankView::layout on DRV of unexpected rank");)
+  }
+  // control flow should never reach here
+  return m_map.layout();
+}
+
+/** \brief  Deep copy a value from Host memory into a view.  */
+template <class ExecSpace, class DT, class... DP>
+inline void deep_copy(
+    const ExecSpace& e, const DynRankView<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  static_assert(
+      std::is_same<typename ViewTraits<DT, DP...>::non_const_value_type,
+                   typename ViewTraits<DT, DP...>::value_type>::value,
+      "deep_copy requires non-const type");
+
+  Impl::apply_to_view_of_static_rank(
+      [=](auto view) { deep_copy(e, view, value); }, dst);
+}
+
+template <class DT, class... DP>
+inline void deep_copy(
+    const DynRankView<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  Impl::apply_to_view_of_static_rank([=](auto view) { deep_copy(view, value); },
+                                     dst);
+}
+
+/** \brief  Deep copy into a value in Host memory from a view.  */
+template <class ExecSpace, class ST, class... SP>
+inline void deep_copy(
+    const ExecSpace& e,
+    typename ViewTraits<ST, SP...>::non_const_value_type& dst,
+    const DynRankView<ST, SP...>& src,
+    std::enable_if_t<std::is_same<typename ViewTraits<ST, SP...>::specialize,
+                                  void>::value>* = 0) {
+  deep_copy(e, dst, Impl::as_view_of_rank_n<0>(src));
+}
+
+template <class ST, class... SP>
+inline void deep_copy(
+    typename ViewTraits<ST, SP...>::non_const_value_type& dst,
+    const DynRankView<ST, SP...>& src,
+    std::enable_if_t<std::is_same<typename ViewTraits<ST, SP...>::specialize,
+                                  void>::value>* = 0) {
+  deep_copy(dst, Impl::as_view_of_rank_n<0>(src));
+}
+
+//----------------------------------------------------------------------------
+/** \brief  A deep copy between views of the default specialization, compatible
+ * type, same rank, same contiguous layout.
+ *
+ * A rank mismatch will error out in the attempt to convert to a View
+ */
+template <class ExecSpace, class DstType, class SrcType>
+inline void deep_copy(
+    const ExecSpace& exec_space, const DstType& dst, const SrcType& src,
+    std::enable_if_t<
+        (std::is_void<typename DstType::traits::specialize>::value &&
+         std::is_void<typename SrcType::traits::specialize>::value &&
+         (Kokkos::is_dyn_rank_view<DstType>::value ||
+          Kokkos::is_dyn_rank_view<SrcType>::value))>* = nullptr) {
+  static_assert(
+      std::is_same<typename DstType::traits::value_type,
+                   typename DstType::traits::non_const_value_type>::value,
+      "deep_copy requires non-const destination type");
+
+  switch (rank(dst)) {
+    case 0:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<0>(dst),
+                Impl::as_view_of_rank_n<0>(src));
+      break;
+    case 1:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<1>(dst),
+                Impl::as_view_of_rank_n<1>(src));
+      break;
+    case 2:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<2>(dst),
+                Impl::as_view_of_rank_n<2>(src));
+      break;
+    case 3:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<3>(dst),
+                Impl::as_view_of_rank_n<3>(src));
+      break;
+    case 4:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<4>(dst),
+                Impl::as_view_of_rank_n<4>(src));
+      break;
+    case 5:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<5>(dst),
+                Impl::as_view_of_rank_n<5>(src));
+      break;
+    case 6:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<6>(dst),
+                Impl::as_view_of_rank_n<6>(src));
+      break;
+    case 7:
+      deep_copy(exec_space, Impl::as_view_of_rank_n<7>(dst),
+                Impl::as_view_of_rank_n<7>(src));
+      break;
+    default:
+      Kokkos::Impl::throw_runtime_exception(
+          "Calling DynRankView deep_copy with a view of unexpected rank " +
+          std::to_string(rank(dst)));
+  }
+}
+
+template <class DstType, class SrcType>
+inline void deep_copy(
+    const DstType& dst, const SrcType& src,
+    std::enable_if_t<
+        (std::is_void<typename DstType::traits::specialize>::value &&
+         std::is_void<typename SrcType::traits::specialize>::value &&
+         (Kokkos::is_dyn_rank_view<DstType>::value ||
+          Kokkos::is_dyn_rank_view<SrcType>::value))>* = nullptr) {
+  static_assert(
+      std::is_same<typename DstType::traits::value_type,
+                   typename DstType::traits::non_const_value_type>::value,
+      "deep_copy requires non-const destination type");
+
+  switch (rank(dst)) {
+    case 0:
+      deep_copy(Impl::as_view_of_rank_n<0>(dst),
+                Impl::as_view_of_rank_n<0>(src));
+      break;
+    case 1:
+      deep_copy(Impl::as_view_of_rank_n<1>(dst),
+                Impl::as_view_of_rank_n<1>(src));
+      break;
+    case 2:
+      deep_copy(Impl::as_view_of_rank_n<2>(dst),
+                Impl::as_view_of_rank_n<2>(src));
+      break;
+    case 3:
+      deep_copy(Impl::as_view_of_rank_n<3>(dst),
+                Impl::as_view_of_rank_n<3>(src));
+      break;
+    case 4:
+      deep_copy(Impl::as_view_of_rank_n<4>(dst),
+                Impl::as_view_of_rank_n<4>(src));
+      break;
+    case 5:
+      deep_copy(Impl::as_view_of_rank_n<5>(dst),
+                Impl::as_view_of_rank_n<5>(src));
+      break;
+    case 6:
+      deep_copy(Impl::as_view_of_rank_n<6>(dst),
+                Impl::as_view_of_rank_n<6>(src));
+      break;
+    case 7:
+      deep_copy(Impl::as_view_of_rank_n<7>(dst),
+                Impl::as_view_of_rank_n<7>(src));
+      break;
+    default:
+      Kokkos::Impl::throw_runtime_exception(
+          "Calling DynRankView deep_copy with a view of unexpected rank " +
+          std::to_string(rank(dst)));
+  }
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+// Deduce Mirror Types
+template <class Space, class T, class... P>
+struct MirrorDRViewType {
+  // The incoming view_type
+  using src_view_type = typename Kokkos::DynRankView<T, P...>;
+  // The memory space for the mirror view
+  using memory_space = typename Space::memory_space;
+  // Check whether it is the same memory space
+  enum {
+    is_same_memspace =
+        std::is_same<memory_space, typename src_view_type::memory_space>::value
+  };
+  // The array_layout
+  using array_layout = typename src_view_type::array_layout;
+  // The data type (we probably want it non-const since otherwise we can't even
+  // deep_copy to it.
+  using data_type = typename src_view_type::non_const_data_type;
+  // The destination view type if it is not the same memory space
+  using dest_view_type = Kokkos::DynRankView<data_type, array_layout, Space>;
+  // If it is the same memory_space return the existsing view_type
+  // This will also keep the unmanaged trait if necessary
+  using view_type =
+      std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
+};
+
+template <class Space, class T, class... P>
+struct MirrorDRVType {
+  // The incoming view_type
+  using src_view_type = typename Kokkos::DynRankView<T, P...>;
+  // The memory space for the mirror view
+  using memory_space = typename Space::memory_space;
+  // Check whether it is the same memory space
+  enum {
+    is_same_memspace =
+        std::is_same<memory_space, typename src_view_type::memory_space>::value
+  };
+  // The array_layout
+  using array_layout = typename src_view_type::array_layout;
+  // The data type (we probably want it non-const since otherwise we can't even
+  // deep_copy to it.
+  using data_type = typename src_view_type::non_const_data_type;
+  // The destination view type if it is not the same memory space
+  using view_type = Kokkos::DynRankView<data_type, array_layout, Space>;
+};
+
+}  // namespace Impl
+
+namespace Impl {
+template <class T, class... P, class... ViewCtorArgs>
+inline typename DynRankView<T, P...>::HostMirror create_mirror(
+    const DynRankView<T, P...>& src,
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    std::enable_if_t<!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
+        nullptr) {
+  using src_type = DynRankView<T, P...>;
+  using dst_type = typename src_type::HostMirror;
+
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  return dst_type(prop_copy, Impl::reconstructLayout(src.layout(), src.rank()));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(
+    const DynRankView<T, P...>& src,
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    std::enable_if_t<Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
+        nullptr) {
+  using dst_type = typename Impl::MirrorDRVType<
+      typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+      P...>::view_type;
+
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  return dst_type(prop_copy, Impl::reconstructLayout(src.layout(), src.rank()));
+}
+
+}  // namespace Impl
+
+// Create a mirror in host space
+template <class T, class... P>
+inline typename DynRankView<T, P...>::HostMirror create_mirror(
+    const DynRankView<T, P...>& src,
+    std::enable_if_t<std::is_same<typename ViewTraits<T, P...>::specialize,
+                                  void>::value>* = nullptr) {
+  return Impl::create_mirror(src, Kokkos::Impl::ViewCtorProp<>{});
+}
+
+template <class T, class... P>
+inline typename DynRankView<T, P...>::HostMirror create_mirror(
+    Kokkos::Impl::WithoutInitializing_t wi, const DynRankView<T, P...>& src,
+    std::enable_if_t<std::is_same<typename ViewTraits<T, P...>::specialize,
+                                  void>::value>* = nullptr) {
+  return Impl::create_mirror(src, Kokkos::view_alloc(wi));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline typename DynRankView<T, P...>::HostMirror create_mirror(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const DynRankView<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* = nullptr) {
+  return Impl::create_mirror(src, arg_prop);
+}
+
+// Create a mirror in a new space
+template <class Space, class T, class... P,
+          typename Enable = std::enable_if_t<
+              Kokkos::is_space<Space>::value &&
+              std::is_void<typename ViewTraits<T, P...>::specialize>::value>>
+typename Impl::MirrorDRVType<Space, T, P...>::view_type create_mirror(
+    const Space&, const Kokkos::DynRankView<T, P...>& src) {
+  return Impl::create_mirror(
+      src, Kokkos::view_alloc(typename Space::memory_space{}));
+}
+
+template <class Space, class T, class... P>
+typename Impl::MirrorDRVType<Space, T, P...>::view_type create_mirror(
+    Kokkos::Impl::WithoutInitializing_t wi, const Space&,
+    const Kokkos::DynRankView<T, P...>& src,
+    std::enable_if_t<std::is_same<typename ViewTraits<T, P...>::specialize,
+                                  void>::value>* = nullptr) {
+  return Impl::create_mirror(
+      src, Kokkos::view_alloc(wi, typename Space::memory_space{}));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const DynRankView<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* = nullptr) {
+  using ReturnType = typename Impl::MirrorDRVType<
+      typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+      P...>::view_type;
+  return ReturnType{Impl::create_mirror(src, arg_prop)};
+}
+
+namespace Impl {
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    std::is_same<
+        typename DynRankView<T, P...>::memory_space,
+        typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
+        std::is_same<
+            typename DynRankView<T, P...>::data_type,
+            typename DynRankView<T, P...>::HostMirror::data_type>::value,
+    typename DynRankView<T, P...>::HostMirror>
+create_mirror_view(const DynRankView<T, P...>& src,
+                   const typename Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    !(std::is_same<
+          typename DynRankView<T, P...>::memory_space,
+          typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
+      std::is_same<
+          typename DynRankView<T, P...>::data_type,
+          typename DynRankView<T, P...>::HostMirror::data_type>::value),
+    typename DynRankView<T, P...>::HostMirror>
+create_mirror_view(
+    const DynRankView<T, P...>& src,
+    const typename Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  return Kokkos::Impl::create_mirror(src, arg_prop);
+}
+
+template <class Space, class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    Kokkos::is_space<Space>::value &&
+        Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace,
+    typename Impl::MirrorDRViewType<Space, T, P...>::view_type>
+create_mirror_view(const Space&, const Kokkos::DynRankView<T, P...>& src,
+                   const typename Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+
+template <class Space, class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    Kokkos::is_space<Space>::value &&
+        !Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace,
+    typename Impl::MirrorDRViewType<Space, T, P...>::view_type>
+create_mirror_view(
+    const Space&, const Kokkos::DynRankView<T, P...>& src,
+    const typename Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using MemorySpace = typename Space::memory_space;
+  using alloc_prop  = Impl::ViewCtorProp<ViewCtorArgs..., MemorySpace>;
+  alloc_prop prop_copy(arg_prop);
+
+  return Kokkos::Impl::create_mirror(src, prop_copy);
+}
+}  // namespace Impl
+
+// Create a mirror view in host space
+template <class T, class... P>
+inline std::enable_if_t<
+    (std::is_same<
+         typename DynRankView<T, P...>::memory_space,
+         typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
+     std::is_same<typename DynRankView<T, P...>::data_type,
+                  typename DynRankView<T, P...>::HostMirror::data_type>::value),
+    typename DynRankView<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::DynRankView<T, P...>& src) {
+  return src;
+}
+
+template <class T, class... P>
+inline std::enable_if_t<
+    !(std::is_same<
+          typename DynRankView<T, P...>::memory_space,
+          typename DynRankView<T, P...>::HostMirror::memory_space>::value &&
+      std::is_same<
+          typename DynRankView<T, P...>::data_type,
+          typename DynRankView<T, P...>::HostMirror::data_type>::value),
+    typename DynRankView<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::DynRankView<T, P...>& src) {
+  return Kokkos::create_mirror(src);
+}
+
+template <class T, class... P>
+inline auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi,
+                               const DynRankView<T, P...>& src) {
+  return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
+}
+
+// Create a mirror view in a new space
+// FIXME_C++17 Improve SFINAE here.
+template <class Space, class T, class... P,
+          class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline typename Impl::MirrorDRViewType<Space, T, P...>::view_type
+create_mirror_view(
+    const Space&, const Kokkos::DynRankView<T, P...>& src,
+    std::enable_if_t<
+        Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace>* = nullptr) {
+  return src;
+}
+
+// FIXME_C++17 Improve SFINAE here.
+template <class Space, class T, class... P,
+          class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline typename Impl::MirrorDRViewType<Space, T, P...>::view_type
+create_mirror_view(
+    const Space& space, const Kokkos::DynRankView<T, P...>& src,
+    std::enable_if_t<
+        !Impl::MirrorDRViewType<Space, T, P...>::is_same_memspace>* = nullptr) {
+  return Kokkos::create_mirror(space, src);
+}
+
+template <class Space, class T, class... P>
+inline auto create_mirror_view(Kokkos::Impl::WithoutInitializing_t wi,
+                               const Space& space,
+                               const Kokkos::DynRankView<T, P...>& src) {
+  return Impl::create_mirror_view(space, src, Kokkos::view_alloc(wi));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror_view(
+    const typename Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::DynRankView<T, P...>& src) {
+  return Impl::create_mirror_view(src, arg_prop);
+}
+
+template <class... ViewCtorArgs, class T, class... P>
+auto create_mirror_view_and_copy(
+    const Impl::ViewCtorProp<ViewCtorArgs...>&,
+    const Kokkos::DynRankView<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        Impl::MirrorDRViewType<
+            typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+            P...>::is_same_memspace>* = nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+  static_assert(
+      alloc_prop_input::has_memory_space,
+      "The view constructor arguments passed to "
+      "Kokkos::create_mirror_view_and_copy must include a memory space!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::allow_padding,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not explicitly allow padding!");
+
+  // same behavior as deep_copy(src, src)
+  if (!alloc_prop_input::has_execution_space)
+    fence(
+        "Kokkos::create_mirror_view_and_copy: fence before returning src view");
+  return src;
+}
+
+template <class... ViewCtorArgs, class T, class... P>
+auto create_mirror_view_and_copy(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::DynRankView<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        !Impl::MirrorDRViewType<
+            typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+            P...>::is_same_memspace>* = nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+  static_assert(
+      alloc_prop_input::has_memory_space,
+      "The view constructor arguments passed to "
+      "Kokkos::create_mirror_view_and_copy must include a memory space!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::allow_padding,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not explicitly allow padding!");
+  using Space  = typename alloc_prop_input::memory_space;
+  using Mirror = typename Impl::MirrorDRViewType<Space, T, P...>::view_type;
+
+  // Add some properties if not provided to avoid need for if constexpr
+  using alloc_prop = Impl::ViewCtorProp<
+      ViewCtorArgs...,
+      std::conditional_t<alloc_prop_input::has_label,
+                         std::integral_constant<unsigned int, 12>, std::string>,
+      std::conditional_t<!alloc_prop_input::initialize,
+                         std::integral_constant<unsigned int, 13>,
+                         Impl::WithoutInitializing_t>,
+      std::conditional_t<alloc_prop_input::has_execution_space,
+                         std::integral_constant<unsigned int, 14>,
+                         typename Space::execution_space>>;
+  alloc_prop arg_prop_copy(arg_prop);
+
+  std::string& label =
+      static_cast<Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy).value;
+  if (label.empty()) label = src.label();
+  auto mirror = typename Mirror::non_const_type{
+      arg_prop_copy, Impl::reconstructLayout(src.layout(), src.rank())};
+  if (alloc_prop_input::has_execution_space) {
+    using ExecutionSpace = typename alloc_prop::execution_space;
+    deep_copy(
+        static_cast<Impl::ViewCtorProp<void, ExecutionSpace>&>(arg_prop_copy)
+            .value,
+        mirror, src);
+  } else
+    deep_copy(mirror, src);
+  return mirror;
+}
+
+template <class Space, class T, class... P>
+auto create_mirror_view_and_copy(const Space&,
+                                 const Kokkos::DynRankView<T, P...>& src,
+                                 std::string const& name = "") {
+  return create_mirror_view_and_copy(
+      Kokkos::view_alloc(typename Space::memory_space{}, name), src);
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+/** \brief  Resize a view with copying old data to new data at the corresponding
+ * indices. */
+template <class... ViewCtorArgs, class T, class... P>
+inline void impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+                        DynRankView<T, P...>& v, const size_t n0,
+                        const size_t n1, const size_t n2, const size_t n3,
+                        const size_t n4, const size_t n5, const size_t n6,
+                        const size_t n7) {
+  using drview_type      = DynRankView<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only resize managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::resize "
+                "must not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a memory space instance!");
+
+  // Add execution space here to avoid the need for if constexpr below
+  using alloc_prop = Impl::ViewCtorProp<
+      ViewCtorArgs..., std::string,
+      std::conditional_t<alloc_prop_input::has_execution_space,
+                         std::integral_constant<unsigned int, 10>,
+                         typename drview_type::execution_space>>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      v.label();
+
+  drview_type v_resized(prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
+
+  if (alloc_prop_input::has_execution_space)
+    Kokkos::Impl::DynRankViewRemap<drview_type, drview_type>(
+        static_cast<const Impl::ViewCtorProp<
+            void, typename alloc_prop::execution_space>&>(prop_copy)
+            .value,
+        v_resized, v);
+  else
+    Kokkos::Impl::DynRankViewRemap<drview_type, drview_type>(v_resized, v);
+
+  v = v_resized;
+}
+
+template <class T, class... P>
+inline void resize(DynRankView<T, P...>& v,
+                   const size_t n0 = KOKKOS_INVALID_INDEX,
+                   const size_t n1 = KOKKOS_INVALID_INDEX,
+                   const size_t n2 = KOKKOS_INVALID_INDEX,
+                   const size_t n3 = KOKKOS_INVALID_INDEX,
+                   const size_t n4 = KOKKOS_INVALID_INDEX,
+                   const size_t n5 = KOKKOS_INVALID_INDEX,
+                   const size_t n6 = KOKKOS_INVALID_INDEX,
+                   const size_t n7 = KOKKOS_INVALID_INDEX) {
+  impl_resize(Impl::ViewCtorProp<>{}, v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class... ViewCtorArgs, class T, class... P>
+void resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+            DynRankView<T, P...>& v,
+            const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+  impl_resize(arg_prop, v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<Impl::is_view_ctor_property<I>::value> resize(
+    const I& arg_prop, DynRankView<T, P...>& v,
+    const size_t n0 = KOKKOS_INVALID_INDEX,
+    const size_t n1 = KOKKOS_INVALID_INDEX,
+    const size_t n2 = KOKKOS_INVALID_INDEX,
+    const size_t n3 = KOKKOS_INVALID_INDEX,
+    const size_t n4 = KOKKOS_INVALID_INDEX,
+    const size_t n5 = KOKKOS_INVALID_INDEX,
+    const size_t n6 = KOKKOS_INVALID_INDEX,
+    const size_t n7 = KOKKOS_INVALID_INDEX) {
+  impl_resize(Kokkos::view_alloc(arg_prop), v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+/** \brief  Resize a view with copying old data to new data at the corresponding
+ * indices. */
+template <class... ViewCtorArgs, class T, class... P>
+inline void impl_realloc(DynRankView<T, P...>& v, const size_t n0,
+                         const size_t n1, const size_t n2, const size_t n3,
+                         const size_t n4, const size_t n5, const size_t n6,
+                         const size_t n7,
+                         const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using drview_type      = DynRankView<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only realloc managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a memory space instance!");
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop arg_prop_copy(arg_prop);
+  static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
+      .value = v.label();
+
+  v = drview_type();  // Deallocate first, if the only view to allocation
+  v = drview_type(arg_prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline void realloc(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+                    DynRankView<T, P...>& v,
+                    const size_t n0 = KOKKOS_INVALID_INDEX,
+                    const size_t n1 = KOKKOS_INVALID_INDEX,
+                    const size_t n2 = KOKKOS_INVALID_INDEX,
+                    const size_t n3 = KOKKOS_INVALID_INDEX,
+                    const size_t n4 = KOKKOS_INVALID_INDEX,
+                    const size_t n5 = KOKKOS_INVALID_INDEX,
+                    const size_t n6 = KOKKOS_INVALID_INDEX,
+                    const size_t n7 = KOKKOS_INVALID_INDEX) {
+  impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, arg_prop);
+}
+
+template <class T, class... P>
+inline void realloc(DynRankView<T, P...>& v,
+                    const size_t n0 = KOKKOS_INVALID_INDEX,
+                    const size_t n1 = KOKKOS_INVALID_INDEX,
+                    const size_t n2 = KOKKOS_INVALID_INDEX,
+                    const size_t n3 = KOKKOS_INVALID_INDEX,
+                    const size_t n4 = KOKKOS_INVALID_INDEX,
+                    const size_t n5 = KOKKOS_INVALID_INDEX,
+                    const size_t n6 = KOKKOS_INVALID_INDEX,
+                    const size_t n7 = KOKKOS_INVALID_INDEX) {
+  impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Impl::ViewCtorProp<>{});
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<Impl::is_view_ctor_property<I>::value> realloc(
+    const I& arg_prop, DynRankView<T, P...>& v,
+    const size_t n0 = KOKKOS_INVALID_INDEX,
+    const size_t n1 = KOKKOS_INVALID_INDEX,
+    const size_t n2 = KOKKOS_INVALID_INDEX,
+    const size_t n3 = KOKKOS_INVALID_INDEX,
+    const size_t n4 = KOKKOS_INVALID_INDEX,
+    const size_t n5 = KOKKOS_INVALID_INDEX,
+    const size_t n6 = KOKKOS_INVALID_INDEX,
+    const size_t n7 = KOKKOS_INVALID_INDEX) {
+  impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Kokkos::view_alloc(arg_prop));
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNRANKVIEW
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_DynamicView.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_DynamicView.hpp
new file mode 100644 (file)
index 0000000..015a75c
--- /dev/null
@@ -0,0 +1,1101 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DYNAMIC_VIEW_HPP
+#define KOKKOS_DYNAMIC_VIEW_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNAMICVIEW
+#endif
+
+#include <cstdio>
+
+#include <Kokkos_Core.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace Impl {
+
+/// Utility class to manage memory for chunked arrays on the host and
+/// device. Allocates/deallocates memory on both the host and device along with
+/// providing utilities for creating mirrors and deep copying between them.
+template <typename MemorySpace, typename ValueType>
+struct ChunkedArrayManager {
+  using value_type   = ValueType;
+  using pointer_type = ValueType*;
+  using track_type   = Kokkos::Impl::SharedAllocationTracker;
+
+  ChunkedArrayManager()                           = default;
+  ChunkedArrayManager(ChunkedArrayManager const&) = default;
+  ChunkedArrayManager(ChunkedArrayManager&&)      = default;
+  ChunkedArrayManager& operator=(ChunkedArrayManager&&) = default;
+  ChunkedArrayManager& operator=(const ChunkedArrayManager&) = default;
+
+  template <typename Space, typename Value>
+  friend struct ChunkedArrayManager;
+
+  template <typename Space, typename Value>
+  inline ChunkedArrayManager(const ChunkedArrayManager<Space, Value>& rhs)
+      : m_valid(rhs.m_valid),
+        m_chunk_max(rhs.m_chunk_max),
+        m_chunks((ValueType**)(rhs.m_chunks)),
+        m_track(rhs.m_track),
+        m_chunk_size(rhs.m_chunk_size) {
+    static_assert(
+        Kokkos::Impl::MemorySpaceAccess<MemorySpace, Space>::assignable,
+        "Incompatible ChunkedArrayManager copy construction");
+  }
+
+  ChunkedArrayManager(const unsigned arg_chunk_max,
+                      const unsigned arg_chunk_size)
+      : m_chunk_max(arg_chunk_max), m_chunk_size(arg_chunk_size) {}
+
+ private:
+  struct ACCESSIBLE_TAG {};
+  struct INACCESSIBLE_TAG {};
+
+  ChunkedArrayManager(ACCESSIBLE_TAG, pointer_type* arg_chunks,
+                      const unsigned arg_chunk_max)
+      : m_valid(true), m_chunk_max(arg_chunk_max), m_chunks(arg_chunks) {}
+
+  ChunkedArrayManager(INACCESSIBLE_TAG, const unsigned arg_chunk_max,
+                      const unsigned arg_chunk_size)
+      : m_chunk_max(arg_chunk_max), m_chunk_size(arg_chunk_size) {}
+
+ public:
+  template <typename Space, typename Enable_ = void>
+  struct IsAccessibleFrom;
+
+  template <typename Space>
+  struct IsAccessibleFrom<
+      Space, typename std::enable_if_t<Kokkos::Impl::MemorySpaceAccess<
+                 MemorySpace, Space>::accessible>> : std::true_type {};
+
+  template <typename Space>
+  struct IsAccessibleFrom<
+      Space, typename std::enable_if_t<!Kokkos::Impl::MemorySpaceAccess<
+                 MemorySpace, Space>::accessible>> : std::false_type {};
+
+  template <typename Space>
+  static ChunkedArrayManager<Space, ValueType> create_mirror(
+      ChunkedArrayManager<MemorySpace, ValueType> const& other,
+      std::enable_if_t<IsAccessibleFrom<Space>::value>* = nullptr) {
+    return ChunkedArrayManager<Space, ValueType>{
+        ACCESSIBLE_TAG{}, other.m_chunks, other.m_chunk_max};
+  }
+
+  template <typename Space>
+  static ChunkedArrayManager<Space, ValueType> create_mirror(
+      ChunkedArrayManager<MemorySpace, ValueType> const& other,
+      std::enable_if_t<!IsAccessibleFrom<Space>::value>* = nullptr) {
+    using tag_type =
+        typename ChunkedArrayManager<Space, ValueType>::INACCESSIBLE_TAG;
+    return ChunkedArrayManager<Space, ValueType>{tag_type{}, other.m_chunk_max,
+                                                 other.m_chunk_size};
+  }
+
+ public:
+  void allocate_device(const std::string& label) {
+    if (m_chunks == nullptr) {
+      m_chunks = reinterpret_cast<pointer_type*>(MemorySpace().allocate(
+          label.c_str(), (sizeof(pointer_type) * (m_chunk_max + 2))));
+    }
+  }
+
+  void initialize() {
+    for (unsigned i = 0; i < m_chunk_max + 2; i++) {
+      m_chunks[i] = nullptr;
+    }
+    m_valid = true;
+  }
+
+ private:
+  /// Custom destroy functor for deallocating array chunks along with a linked
+  /// allocation
+  template <typename Space>
+  struct Destroy {
+    Destroy()               = default;
+    Destroy(Destroy&&)      = default;
+    Destroy(const Destroy&) = default;
+    Destroy& operator=(Destroy&&) = default;
+    Destroy& operator=(const Destroy&) = default;
+
+    Destroy(std::string label, value_type** arg_chunk,
+            const unsigned arg_chunk_max, const unsigned arg_chunk_size,
+            value_type** arg_linked)
+        : m_label(label),
+          m_chunks(arg_chunk),
+          m_linked(arg_linked),
+          m_chunk_max(arg_chunk_max),
+          m_chunk_size(arg_chunk_size) {}
+
+    void execute() {
+      // Destroy the array of chunk pointers.
+      // Two entries beyond the max chunks are allocation counters.
+      uintptr_t const len =
+          *reinterpret_cast<uintptr_t*>(m_chunks + m_chunk_max);
+      for (unsigned i = 0; i < len; i++) {
+        Space().deallocate(m_label.c_str(), m_chunks[i],
+                           sizeof(value_type) * m_chunk_size);
+      }
+      // Destroy the linked allocation if we have one.
+      if (m_linked != nullptr) {
+        Space().deallocate(m_label.c_str(), m_linked,
+                           (sizeof(value_type*) * (m_chunk_max + 2)));
+      }
+    }
+
+    void destroy_shared_allocation() { execute(); }
+
+    std::string m_label;
+    value_type** m_chunks = nullptr;
+    value_type** m_linked = nullptr;
+    unsigned m_chunk_max;
+    unsigned m_chunk_size;
+  };
+
+ public:
+  template <typename Space>
+  void allocate_with_destroy(const std::string& label,
+                             pointer_type* linked_allocation = nullptr) {
+    using destroy_type = Destroy<Space>;
+    using record_type =
+        Kokkos::Impl::SharedAllocationRecord<MemorySpace, destroy_type>;
+
+    // Allocate + 2 extra slots so that *m_chunk[m_chunk_max] ==
+    // num_chunks_alloc and *m_chunk[m_chunk_max+1] == extent This must match in
+    // Destroy's execute(...) method
+    record_type* const record = record_type::allocate(
+        MemorySpace(), label, (sizeof(pointer_type) * (m_chunk_max + 2)));
+    m_chunks = static_cast<pointer_type*>(record->data());
+    m_track.assign_allocated_record_to_uninitialized(record);
+
+    record->m_destroy = destroy_type(label, m_chunks, m_chunk_max, m_chunk_size,
+                                     linked_allocation);
+  }
+
+  pointer_type* get_ptr() const { return m_chunks; }
+
+  template <typename OtherMemorySpace, typename ExecutionSpace>
+  void deep_copy_to(
+      const ExecutionSpace& exec_space,
+      ChunkedArrayManager<OtherMemorySpace, ValueType> const& other) const {
+    if (other.m_chunks != m_chunks) {
+      Kokkos::Impl::DeepCopy<OtherMemorySpace, MemorySpace, ExecutionSpace>(
+          exec_space, other.m_chunks, m_chunks,
+          sizeof(pointer_type) * (m_chunk_max + 2));
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  pointer_type* operator+(int i) const { return m_chunks + i; }
+
+  KOKKOS_INLINE_FUNCTION
+  pointer_type& operator[](int i) const { return m_chunks[i]; }
+
+  track_type const& track() const { return m_track; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool valid() const { return m_valid; }
+
+ private:
+  bool m_valid           = false;
+  unsigned m_chunk_max   = 0;
+  pointer_type* m_chunks = nullptr;
+  track_type m_track;
+  unsigned m_chunk_size = 0;
+};
+
+} /* end namespace Impl */
+
+/** \brief Dynamic views are restricted to rank-one and no layout.
+ *         Resize only occurs on host outside of parallel_regions.
+ *         Subviews are not allowed.
+ */
+template <typename DataType, typename... P>
+class DynamicView : public Kokkos::ViewTraits<DataType, P...> {
+ public:
+  using traits = Kokkos::ViewTraits<DataType, P...>;
+
+  using value_type   = typename traits::value_type;
+  using device_space = typename traits::memory_space;
+  using host_space =
+      typename Kokkos::Impl::HostMirror<device_space>::Space::memory_space;
+  using device_accessor = Impl::ChunkedArrayManager<device_space, value_type>;
+  using host_accessor   = Impl::ChunkedArrayManager<host_space, value_type>;
+
+ private:
+  template <class, class...>
+  friend class DynamicView;
+
+  using track_type = Kokkos::Impl::SharedAllocationTracker;
+
+  static_assert(traits::rank == 1 && traits::rank_dynamic == 1,
+                "DynamicView must be rank-one");
+
+  // It is assumed that the value_type is trivially copyable;
+  // when this is not the case, potential problems can occur.
+  static_assert(std::is_void<typename traits::specialize>::value,
+                "DynamicView only implemented for non-specialized View type");
+
+ private:
+  device_accessor m_chunks;
+  host_accessor m_chunks_host;
+  unsigned m_chunk_shift;  // ceil(log2(m_chunk_size))
+  unsigned m_chunk_mask;   // m_chunk_size - 1
+  unsigned m_chunk_max;  // number of entries in the chunk array - each pointing
+                         // to a chunk of extent == m_chunk_size entries
+  unsigned m_chunk_size;  // 2 << (m_chunk_shift - 1)
+
+ public:
+  //----------------------------------------------------------------------
+
+  /** \brief  Compatible view of array of scalar types */
+  using array_type =
+      DynamicView<typename traits::data_type, typename traits::device_type>;
+
+  /** \brief  Compatible view of const data type */
+  using const_type = DynamicView<typename traits::const_data_type,
+                                 typename traits::device_type>;
+
+  /** \brief  Compatible view of non-const data type */
+  using non_const_type = DynamicView<typename traits::non_const_data_type,
+                                     typename traits::device_type>;
+
+  /** \brief  Must be accessible everywhere */
+  using HostMirror = DynamicView;
+
+  /** \brief Unified types */
+  using uniform_device =
+      Kokkos::Device<typename traits::device_type::execution_space,
+                     Kokkos::AnonymousSpace>;
+  using uniform_type               = array_type;
+  using uniform_const_type         = const_type;
+  using uniform_runtime_type       = array_type;
+  using uniform_runtime_const_type = const_type;
+  using uniform_nomemspace_type =
+      DynamicView<typename traits::data_type, uniform_device>;
+  using uniform_const_nomemspace_type =
+      DynamicView<typename traits::const_data_type, uniform_device>;
+  using uniform_runtime_nomemspace_type =
+      DynamicView<typename traits::data_type, uniform_device>;
+  using uniform_runtime_const_nomemspace_type =
+      DynamicView<typename traits::const_data_type, uniform_device>;
+
+  //----------------------------------------------------------------------
+
+  enum { Rank = 1 };
+
+  KOKKOS_INLINE_FUNCTION
+  size_t allocation_extent() const noexcept {
+    uintptr_t n =
+        *reinterpret_cast<const uintptr_t*>(m_chunks_host + m_chunk_max);
+    return (n << m_chunk_shift);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  size_t chunk_size() const noexcept { return m_chunk_size; }
+
+  KOKKOS_INLINE_FUNCTION
+  size_t chunk_max() const noexcept { return m_chunk_max; }
+
+  KOKKOS_INLINE_FUNCTION
+  size_t size() const noexcept {
+    size_t extent_0 =
+        *reinterpret_cast<const size_t*>(m_chunks_host + m_chunk_max + 1);
+    return extent_0;
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION size_t extent(const iType& r) const {
+    return r == 0 ? size() : 1;
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION size_t extent_int(const iType& r) const {
+    return r == 0 ? size() : 1;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const { return 0; }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    *s = 0;
+  }
+
+  //----------------------------------------
+  // Allocation tracking properties
+
+  KOKKOS_INLINE_FUNCTION
+  int use_count() const { return m_chunks_host.track().use_count(); }
+
+  inline const std::string label() const {
+    return m_chunks_host.track().template get_label<host_space>();
+  }
+
+  //----------------------------------------------------------------------
+  // Range span is the span which contains all members.
+
+  using reference_type = typename traits::value_type&;
+  using pointer_type   = typename traits::value_type*;
+
+  enum {
+    reference_type_is_lvalue_reference =
+        std::is_lvalue_reference<reference_type>::value
+  };
+
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return false;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const { return 0; }
+
+  //----------------------------------------
+
+  template <typename I0, class... Args>
+  KOKKOS_INLINE_FUNCTION reference_type
+  operator()(const I0& i0, const Args&... /*args*/) const {
+    static_assert(Kokkos::Impl::are_integral<I0, Args...>::value,
+                  "Indices must be integral type");
+
+    Kokkos::Impl::runtime_check_memory_access_violation<
+        typename traits::memory_space>(
+        "Kokkos::DynamicView ERROR: attempt to access inaccessible memory "
+        "space");
+
+    // Which chunk is being indexed.
+    const uintptr_t ic = uintptr_t(i0) >> m_chunk_shift;
+
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+    const uintptr_t n = *reinterpret_cast<uintptr_t*>(m_chunks + m_chunk_max);
+    if (n <= ic) Kokkos::abort("Kokkos::DynamicView array bounds error");
+#endif
+
+    typename traits::value_type** const ch = m_chunks + ic;
+    return (*ch)[i0 & m_chunk_mask];
+  }
+
+  //----------------------------------------
+  /** \brief  Resizing in serial can grow or shrink the array size
+   *          up to the maximum number of chunks
+   * */
+  template <typename IntType>
+  inline void resize_serial(IntType const& n) {
+    using local_value_type   = typename traits::value_type;
+    using value_pointer_type = local_value_type*;
+
+    const uintptr_t NC =
+        (n + m_chunk_mask) >>
+        m_chunk_shift;  // New total number of chunks needed for resize
+
+    if (m_chunk_max < NC) {
+      Kokkos::abort("DynamicView::resize_serial exceeded maximum size");
+    }
+
+    // *m_chunks[m_chunk_max] stores the current number of chunks being used
+    uintptr_t* const pc =
+        reinterpret_cast<uintptr_t*>(m_chunks_host + m_chunk_max);
+    std::string _label = m_chunks_host.track().template get_label<host_space>();
+
+    if (*pc < NC) {
+      while (*pc < NC) {
+        m_chunks_host[*pc] =
+            reinterpret_cast<value_pointer_type>(device_space().allocate(
+                _label.c_str(), sizeof(local_value_type) << m_chunk_shift));
+        ++*pc;
+      }
+    } else {
+      while (NC + 1 <= *pc) {
+        --*pc;
+        device_space().deallocate(_label.c_str(), m_chunks_host[*pc],
+                                  sizeof(local_value_type) << m_chunk_shift);
+        m_chunks_host[*pc] = nullptr;
+      }
+    }
+    // *m_chunks_host[m_chunk_max+1] stores the 'extent' requested by resize
+    *(pc + 1) = n;
+
+    typename device_space::execution_space exec{};
+    m_chunks_host.deep_copy_to(exec, m_chunks);
+    exec.fence(
+        "DynamicView::resize_serial: Fence after copying chunks to the device");
+  }
+
+  KOKKOS_INLINE_FUNCTION bool is_allocated() const {
+    if (m_chunks_host.valid()) {
+      // *m_chunks_host[m_chunk_max] stores the current number of chunks being
+      // used
+      uintptr_t* const pc =
+          reinterpret_cast<uintptr_t*>(m_chunks_host + m_chunk_max);
+      return (*(pc + 1) > 0);
+    } else {
+      return false;
+    }
+  }
+
+  KOKKOS_FUNCTION const device_accessor& impl_get_chunks() const {
+    return m_chunks;
+  }
+
+  KOKKOS_FUNCTION device_accessor& impl_get_chunks() { return m_chunks; }
+
+  //----------------------------------------------------------------------
+
+  ~DynamicView()                  = default;
+  DynamicView()                   = default;
+  DynamicView(DynamicView&&)      = default;
+  DynamicView(const DynamicView&) = default;
+  DynamicView& operator=(DynamicView&&) = default;
+  DynamicView& operator=(const DynamicView&) = default;
+
+  template <class RT, class... RP>
+  DynamicView(const DynamicView<RT, RP...>& rhs)
+      : m_chunks(rhs.m_chunks),
+        m_chunks_host(rhs.m_chunks_host),
+        m_chunk_shift(rhs.m_chunk_shift),
+        m_chunk_mask(rhs.m_chunk_mask),
+        m_chunk_max(rhs.m_chunk_max),
+        m_chunk_size(rhs.m_chunk_size) {
+    using SrcTraits = typename DynamicView<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible DynamicView copy construction");
+  }
+
+  /**\brief  Allocation constructor
+   *
+   *  Memory is allocated in chunks
+   *  A maximum size is required in order to allocate a
+   *  chunk-pointer array.
+   */
+  template <class... Prop>
+  DynamicView(const Kokkos::Impl::ViewCtorProp<Prop...>& arg_prop,
+              const unsigned min_chunk_size,
+              const unsigned max_extent)
+      :  // The chunk size is guaranteed to be a power of two
+        m_chunk_shift(Kokkos::Impl::integral_power_of_two_that_contains(
+            min_chunk_size))  // div ceil(log2(min_chunk_size))
+        ,
+        m_chunk_mask((1 << m_chunk_shift) - 1)  // mod
+        ,
+        m_chunk_max((max_extent + m_chunk_mask) >>
+                    m_chunk_shift)  // max num pointers-to-chunks in array
+        ,
+        m_chunk_size(2 << (m_chunk_shift - 1)) {
+    m_chunks = device_accessor(m_chunk_max, m_chunk_size);
+
+    const std::string& label =
+        static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
+            arg_prop)
+            .value;
+
+    if (device_accessor::template IsAccessibleFrom<host_space>::value) {
+      m_chunks.template allocate_with_destroy<device_space>(label);
+      m_chunks.initialize();
+      m_chunks_host =
+          device_accessor::template create_mirror<host_space>(m_chunks);
+    } else {
+      m_chunks.allocate_device(label);
+      m_chunks_host =
+          device_accessor::template create_mirror<host_space>(m_chunks);
+      m_chunks_host.template allocate_with_destroy<device_space>(
+          label, m_chunks.get_ptr());
+      m_chunks_host.initialize();
+
+      // Add some properties if not provided to avoid need for if constexpr
+      using alloc_prop_input = Kokkos::Impl::ViewCtorProp<Prop...>;
+      using alloc_prop       = Kokkos::Impl::ViewCtorProp<
+          Prop..., std::conditional_t<alloc_prop_input::has_execution_space,
+                                      std::integral_constant<unsigned int, 15>,
+                                      typename device_space::execution_space>>;
+      alloc_prop arg_prop_copy(arg_prop);
+
+      const auto& exec = static_cast<const Kokkos::Impl::ViewCtorProp<
+          void, typename alloc_prop::execution_space>&>(arg_prop_copy)
+                             .value;
+      m_chunks_host.deep_copy_to(exec, m_chunks);
+      if (!alloc_prop_input::has_execution_space)
+        exec.fence(
+            "DynamicView::DynamicView(): Fence after copying chunks to the "
+            "device");
+    }
+  }
+
+  DynamicView(const std::string& arg_label, const unsigned min_chunk_size,
+              const unsigned max_extent)
+      : DynamicView(Kokkos::view_alloc(arg_label), min_chunk_size, max_extent) {
+  }
+};
+
+}  // namespace Experimental
+
+template <class>
+struct is_dynamic_view : public std::false_type {};
+
+template <class D, class... P>
+struct is_dynamic_view<Kokkos::Experimental::DynamicView<D, P...>>
+    : public std::true_type {};
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+namespace Impl {
+
+// Deduce Mirror Types
+template <class Space, class T, class... P>
+struct MirrorDynamicViewType {
+  // The incoming view_type
+  using src_view_type = typename Kokkos::Experimental::DynamicView<T, P...>;
+  // The memory space for the mirror view
+  using memory_space = typename Space::memory_space;
+  // Check whether it is the same memory space
+  enum {
+    is_same_memspace =
+        std::is_same<memory_space, typename src_view_type::memory_space>::value
+  };
+  // The array_layout
+  using array_layout = typename src_view_type::array_layout;
+  // The data type (we probably want it non-const since otherwise we can't even
+  // deep_copy to it.)
+  using data_type = typename src_view_type::non_const_data_type;
+  // The destination view type if it is not the same memory space
+  using dest_view_type =
+      Kokkos::Experimental::DynamicView<data_type, array_layout, Space>;
+  // If it is the same memory_space return the existing view_type
+  // This will also keep the unmanaged trait if necessary
+  using view_type =
+      std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
+};
+}  // namespace Impl
+
+namespace Impl {
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(
+    const Kokkos::Experimental::DynamicView<T, P...>& src,
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    std::enable_if_t<!Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
+        nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  auto ret = typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror(
+      prop_copy, src.chunk_size(), src.chunk_max() * src.chunk_size());
+
+  ret.resize_serial(src.extent(0));
+
+  return ret;
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(
+    const Kokkos::Experimental::DynamicView<T, P...>& src,
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    std::enable_if_t<Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>* =
+        nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  using MemorySpace = typename alloc_prop_input::memory_space;
+  using alloc_prop  = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  auto ret = typename Kokkos::Impl::MirrorDynamicViewType<
+      MemorySpace, T, P...>::view_type(prop_copy, src.chunk_size(),
+                                       src.chunk_max() * src.chunk_size());
+
+  ret.resize_serial(src.extent(0));
+
+  return ret;
+}
+}  // namespace Impl
+
+// Create a mirror in host space
+template <class T, class... P>
+inline auto create_mirror(
+    const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror(src, Impl::ViewCtorProp<>{});
+}
+
+template <class T, class... P>
+inline auto create_mirror(
+    Kokkos::Impl::WithoutInitializing_t wi,
+    const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror(src, Kokkos::view_alloc(wi));
+}
+
+// Create a mirror in a new space
+template <class Space, class T, class... P>
+inline auto create_mirror(
+    const Space&, const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror(
+      src, Impl::ViewCtorProp<>{typename Space::memory_space{}});
+}
+
+template <class Space, class T, class... P>
+typename Kokkos::Impl::MirrorDynamicViewType<Space, T, P...>::view_type
+create_mirror(Kokkos::Impl::WithoutInitializing_t wi, const Space&,
+              const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror(
+      src, Kokkos::view_alloc(wi, typename Space::memory_space{}));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror(src, arg_prop);
+}
+
+namespace Impl {
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    (std::is_same<
+         typename Kokkos::Experimental::DynamicView<T, P...>::memory_space,
+         typename Kokkos::Experimental::DynamicView<
+             T, P...>::HostMirror::memory_space>::value &&
+     std::is_same<
+         typename Kokkos::Experimental::DynamicView<T, P...>::data_type,
+         typename Kokkos::Experimental::DynamicView<
+             T, P...>::HostMirror::data_type>::value),
+    typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror>
+create_mirror_view(
+    const typename Kokkos::Experimental::DynamicView<T, P...>& src,
+    const Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    !(std::is_same<
+          typename Kokkos::Experimental::DynamicView<T, P...>::memory_space,
+          typename Kokkos::Experimental::DynamicView<
+              T, P...>::HostMirror::memory_space>::value &&
+      std::is_same<
+          typename Kokkos::Experimental::DynamicView<T, P...>::data_type,
+          typename Kokkos::Experimental::DynamicView<
+              T, P...>::HostMirror::data_type>::value),
+    typename Kokkos::Experimental::DynamicView<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::Experimental::DynamicView<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  return Kokkos::create_mirror(arg_prop, src);
+}
+
+template <class Space, class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    Impl::MirrorDynamicViewType<Space, T, P...>::is_same_memspace,
+    typename Kokkos::Impl::MirrorDynamicViewType<Space, T, P...>::view_type>
+create_mirror_view(const Space&,
+                   const Kokkos::Experimental::DynamicView<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+}  // namespace Impl
+
+// Create a mirror view in host space
+template <class T, class... P>
+inline auto create_mirror_view(
+    const typename Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror_view(src, Impl::ViewCtorProp<>{});
+}
+
+template <class T, class... P>
+inline auto create_mirror_view(
+    Kokkos::Impl::WithoutInitializing_t wi,
+    const typename Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
+}
+
+// Create a mirror in a new space
+template <class Space, class T, class... P>
+inline auto create_mirror_view(
+    const Space& space, const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror_view(space, src, Impl::ViewCtorProp<>{});
+}
+
+template <class Space, class T, class... P>
+inline auto create_mirror_view(
+    Kokkos::Impl::WithoutInitializing_t wi, const Space&,
+    const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror_view(
+      src, Kokkos::view_alloc(wi, typename Space::memory_space{}));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror_view(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::Experimental::DynamicView<T, P...>& src) {
+  return Impl::create_mirror_view(src, arg_prop);
+}
+
+template <class T, class... DP, class... SP>
+inline void deep_copy(const Kokkos::Experimental::DynamicView<T, DP...>& dst,
+                      const Kokkos::Experimental::DynamicView<T, SP...>& src) {
+  using dst_type = Kokkos::Experimental::DynamicView<T, DP...>;
+  using src_type = Kokkos::Experimental::DynamicView<T, SP...>;
+
+  using dst_execution_space = typename ViewTraits<T, DP...>::execution_space;
+  using src_execution_space = typename ViewTraits<T, SP...>::execution_space;
+  using dst_memory_space    = typename ViewTraits<T, DP...>::memory_space;
+  using src_memory_space    = typename ViewTraits<T, SP...>::memory_space;
+
+  constexpr bool DstExecCanAccessSrc =
+      Kokkos::SpaceAccessibility<dst_execution_space,
+                                 src_memory_space>::accessible;
+  constexpr bool SrcExecCanAccessDst =
+      Kokkos::SpaceAccessibility<src_execution_space,
+                                 dst_memory_space>::accessible;
+
+  if (DstExecCanAccessSrc)
+    Kokkos::Impl::ViewRemap<dst_type, src_type, dst_execution_space>(dst, src);
+  else if (SrcExecCanAccessDst)
+    Kokkos::Impl::ViewRemap<dst_type, src_type, src_execution_space>(dst, src);
+  else
+    src.impl_get_chunks().deep_copy_to(dst_execution_space{},
+                                       dst.impl_get_chunks());
+  Kokkos::fence("Kokkos::deep_copy(DynamicView)");
+}
+
+template <class ExecutionSpace, class T, class... DP, class... SP>
+inline void deep_copy(const ExecutionSpace& exec,
+                      const Kokkos::Experimental::DynamicView<T, DP...>& dst,
+                      const Kokkos::Experimental::DynamicView<T, SP...>& src) {
+  using dst_type = Kokkos::Experimental::DynamicView<T, DP...>;
+  using src_type = Kokkos::Experimental::DynamicView<T, SP...>;
+
+  using dst_execution_space = typename ViewTraits<T, DP...>::execution_space;
+  using src_execution_space = typename ViewTraits<T, SP...>::execution_space;
+  using dst_memory_space    = typename ViewTraits<T, DP...>::memory_space;
+  using src_memory_space    = typename ViewTraits<T, SP...>::memory_space;
+
+  constexpr bool DstExecCanAccessSrc =
+      Kokkos::SpaceAccessibility<dst_execution_space,
+                                 src_memory_space>::accessible;
+  constexpr bool SrcExecCanAccessDst =
+      Kokkos::SpaceAccessibility<src_execution_space,
+                                 dst_memory_space>::accessible;
+
+  // FIXME use execution space
+  if (DstExecCanAccessSrc)
+    Kokkos::Impl::ViewRemap<dst_type, src_type, dst_execution_space>(dst, src);
+  else if (SrcExecCanAccessDst)
+    Kokkos::Impl::ViewRemap<dst_type, src_type, src_execution_space>(dst, src);
+  else
+    src.impl_get_chunks().deep_copy_to(exec, dst.impl_get_chunks());
+}
+
+template <class T, class... DP, class... SP>
+inline void deep_copy(const View<T, DP...>& dst,
+                      const Kokkos::Experimental::DynamicView<T, SP...>& src) {
+  using dst_type = View<T, DP...>;
+  using src_type = Kokkos::Experimental::DynamicView<T, SP...>;
+
+  using dst_execution_space = typename ViewTraits<T, DP...>::execution_space;
+  using src_memory_space    = typename ViewTraits<T, SP...>::memory_space;
+
+  enum {
+    DstExecCanAccessSrc =
+        Kokkos::SpaceAccessibility<dst_execution_space,
+                                   src_memory_space>::accessible
+  };
+
+  if (DstExecCanAccessSrc) {
+    // Copying data between views in accessible memory spaces and either
+    // non-contiguous or incompatible shape.
+    Kokkos::Impl::ViewRemap<dst_type, src_type>(dst, src);
+    Kokkos::fence("Kokkos::deep_copy(DynamicView)");
+  } else {
+    Kokkos::Impl::throw_runtime_exception(
+        "deep_copy given views that would require a temporary allocation");
+  }
+}
+
+template <class T, class... DP, class... SP>
+inline void deep_copy(const Kokkos::Experimental::DynamicView<T, DP...>& dst,
+                      const View<T, SP...>& src) {
+  using dst_type = Kokkos::Experimental::DynamicView<T, SP...>;
+  using src_type = View<T, DP...>;
+
+  using dst_execution_space = typename ViewTraits<T, DP...>::execution_space;
+  using src_memory_space    = typename ViewTraits<T, SP...>::memory_space;
+
+  enum {
+    DstExecCanAccessSrc =
+        Kokkos::SpaceAccessibility<dst_execution_space,
+                                   src_memory_space>::accessible
+  };
+
+  if (DstExecCanAccessSrc) {
+    // Copying data between views in accessible memory spaces and either
+    // non-contiguous or incompatible shape.
+    Kokkos::Impl::ViewRemap<dst_type, src_type>(dst, src);
+    Kokkos::fence("Kokkos::deep_copy(DynamicView)");
+  } else {
+    Kokkos::Impl::throw_runtime_exception(
+        "deep_copy given views that would require a temporary allocation");
+  }
+}
+
+namespace Impl {
+template <class Arg0, class... DP, class... SP>
+struct CommonSubview<Kokkos::Experimental::DynamicView<DP...>,
+                     Kokkos::Experimental::DynamicView<SP...>, 1, Arg0> {
+  using DstType          = Kokkos::Experimental::DynamicView<DP...>;
+  using SrcType          = Kokkos::Experimental::DynamicView<SP...>;
+  using dst_subview_type = DstType;
+  using src_subview_type = SrcType;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& /*arg0*/)
+      : dst_sub(dst), src_sub(src) {}
+};
+
+template <class... DP, class SrcType, class Arg0>
+struct CommonSubview<Kokkos::Experimental::DynamicView<DP...>, SrcType, 1,
+                     Arg0> {
+  using DstType          = Kokkos::Experimental::DynamicView<DP...>;
+  using dst_subview_type = DstType;
+  using src_subview_type = typename Kokkos::Subview<SrcType, Arg0>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0)
+      : dst_sub(dst), src_sub(src, arg0) {}
+};
+
+template <class DstType, class... SP, class Arg0>
+struct CommonSubview<DstType, Kokkos::Experimental::DynamicView<SP...>, 1,
+                     Arg0> {
+  using SrcType          = Kokkos::Experimental::DynamicView<SP...>;
+  using dst_subview_type = typename Kokkos::Subview<DstType, Arg0>;
+  using src_subview_type = SrcType;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0)
+      : dst_sub(dst, arg0), src_sub(src) {}
+};
+
+template <class... DP, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<Kokkos::Experimental::DynamicView<DP...>, ViewTypeB, Layout,
+                ExecSpace, 1, iType> {
+  Kokkos::Experimental::DynamicView<DP...> a;
+  ViewTypeB b;
+
+  using policy_type = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<iType>>;
+
+  ViewCopy(const Kokkos::Experimental::DynamicView<DP...>& a_,
+           const ViewTypeB& b_)
+      : a(a_), b(b_) {
+    Kokkos::parallel_for("Kokkos::ViewCopy-1D", policy_type(0, b.extent(0)),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0) const { a(i0) = b(i0); };
+};
+
+template <class... DP, class... SP, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<Kokkos::Experimental::DynamicView<DP...>,
+                Kokkos::Experimental::DynamicView<SP...>, Layout, ExecSpace, 1,
+                iType> {
+  Kokkos::Experimental::DynamicView<DP...> a;
+  Kokkos::Experimental::DynamicView<SP...> b;
+
+  using policy_type = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<iType>>;
+
+  ViewCopy(const Kokkos::Experimental::DynamicView<DP...>& a_,
+           const Kokkos::Experimental::DynamicView<SP...>& b_)
+      : a(a_), b(b_) {
+    const iType n = std::min(a.extent(0), b.extent(0));
+    Kokkos::parallel_for("Kokkos::ViewCopy-1D", policy_type(0, n), *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0) const { a(i0) = b(i0); };
+};
+
+}  // namespace Impl
+
+template <class... ViewCtorArgs, class T, class... P>
+auto create_mirror_view_and_copy(
+    const Impl::ViewCtorProp<ViewCtorArgs...>&,
+    const Kokkos::Experimental::DynamicView<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        Impl::MirrorDynamicViewType<
+            typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+            P...>::is_same_memspace>* = nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+  static_assert(
+      alloc_prop_input::has_memory_space,
+      "The view constructor arguments passed to "
+      "Kokkos::create_mirror_view_and_copy must include a memory space!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::allow_padding,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not explicitly allow padding!");
+
+  // same behavior as deep_copy(src, src)
+  if (!alloc_prop_input::has_execution_space)
+    fence(
+        "Kokkos::create_mirror_view_and_copy: fence before returning src view");
+  return src;
+}
+
+template <class... ViewCtorArgs, class T, class... P>
+auto create_mirror_view_and_copy(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::Experimental::DynamicView<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        !Impl::MirrorDynamicViewType<
+            typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+            P...>::is_same_memspace>* = nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+  static_assert(
+      alloc_prop_input::has_memory_space,
+      "The view constructor arguments passed to "
+      "Kokkos::create_mirror_view_and_copy must include a memory space!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::allow_padding,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not explicitly allow padding!");
+  using Space = typename alloc_prop_input::memory_space;
+  using Mirror =
+      typename Impl::MirrorDynamicViewType<Space, T, P...>::view_type;
+
+  // Add some properties if not provided to avoid need for if constexpr
+  using alloc_prop = Impl::ViewCtorProp<
+      ViewCtorArgs...,
+      std::conditional_t<alloc_prop_input::has_label,
+                         std::integral_constant<unsigned int, 12>, std::string>,
+      std::conditional_t<!alloc_prop_input::initialize,
+                         std::integral_constant<unsigned int, 13>,
+                         Impl::WithoutInitializing_t>,
+      std::conditional_t<alloc_prop_input::has_execution_space,
+                         std::integral_constant<unsigned int, 14>,
+                         typename Space::execution_space>>;
+  alloc_prop arg_prop_copy(arg_prop);
+
+  std::string& label =
+      static_cast<Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy).value;
+  if (label.empty()) label = src.label();
+  auto mirror = typename Mirror::non_const_type(
+      arg_prop_copy, src.chunk_size(), src.chunk_max() * src.chunk_size());
+  mirror.resize_serial(src.extent(0));
+  if (alloc_prop_input::has_execution_space) {
+    using ExecutionSpace = typename alloc_prop::execution_space;
+    deep_copy(
+        static_cast<Impl::ViewCtorProp<void, ExecutionSpace>&>(arg_prop_copy)
+            .value,
+        mirror, src);
+  } else
+    deep_copy(mirror, src);
+  return mirror;
+}
+
+template <class Space, class T, class... P>
+auto create_mirror_view_and_copy(
+    const Space&, const Kokkos::Experimental::DynamicView<T, P...>& src,
+    std::string const& name = "") {
+  return create_mirror_view_and_copy(
+      Kokkos::view_alloc(typename Space::memory_space{}, name), src);
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNAMICVIEW
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DYNAMICVIEW
+#endif
+#endif /* #ifndef KOKKOS_DYNAMIC_VIEW_HPP */
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_ErrorReporter.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_ErrorReporter.hpp
new file mode 100644 (file)
index 0000000..8affa0b
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_ERROR_REPORTER_HPP
+#define KOKKOS_EXPERIMENTAL_ERROR_REPORTER_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ERRORREPORTER
+#endif
+
+#include <vector>
+#include <Kokkos_Core.hpp>
+#include <Kokkos_View.hpp>
+#include <Kokkos_DualView.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+template <typename ReportType, typename DeviceType>
+class ErrorReporter {
+ public:
+  using report_type     = ReportType;
+  using device_type     = DeviceType;
+  using execution_space = typename device_type::execution_space;
+
+  ErrorReporter(int max_results)
+      : m_numReportsAttempted(""),
+        m_reports("", max_results),
+        m_reporters("", max_results) {
+    clear();
+  }
+
+  int getCapacity() const { return m_reports.h_view.extent(0); }
+
+  int getNumReports();
+
+  int getNumReportAttempts();
+
+  void getReports(std::vector<int> &reporters_out,
+                  std::vector<report_type> &reports_out);
+  void getReports(
+      typename Kokkos::View<int *,
+                            typename DeviceType::execution_space>::HostMirror
+          &reporters_out,
+      typename Kokkos::View<report_type *,
+                            typename DeviceType::execution_space>::HostMirror
+          &reports_out);
+
+  void clear();
+
+  void resize(const size_t new_size);
+
+  bool full() { return (getNumReportAttempts() >= getCapacity()); }
+
+  KOKKOS_INLINE_FUNCTION
+  bool add_report(int reporter_id, report_type report) const {
+    int idx = Kokkos::atomic_fetch_add(&m_numReportsAttempted(), 1);
+
+    if (idx >= 0 && (idx < static_cast<int>(m_reports.d_view.extent(0)))) {
+      m_reporters.d_view(idx) = reporter_id;
+      m_reports.d_view(idx)   = report;
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+ private:
+  using reports_view_t     = Kokkos::View<report_type *, device_type>;
+  using reports_dualview_t = Kokkos::DualView<report_type *, device_type>;
+
+  using host_mirror_space = typename reports_dualview_t::host_mirror_space;
+  Kokkos::View<int, device_type> m_numReportsAttempted;
+  reports_dualview_t m_reports;
+  Kokkos::DualView<int *, device_type> m_reporters;
+};
+
+template <typename ReportType, typename DeviceType>
+inline int ErrorReporter<ReportType, DeviceType>::getNumReports() {
+  int num_reports = 0;
+  Kokkos::deep_copy(num_reports, m_numReportsAttempted);
+  if (num_reports > static_cast<int>(m_reports.h_view.extent(0))) {
+    num_reports = m_reports.h_view.extent(0);
+  }
+  return num_reports;
+}
+
+template <typename ReportType, typename DeviceType>
+inline int ErrorReporter<ReportType, DeviceType>::getNumReportAttempts() {
+  int num_reports = 0;
+  Kokkos::deep_copy(num_reports, m_numReportsAttempted);
+  return num_reports;
+}
+
+template <typename ReportType, typename DeviceType>
+void ErrorReporter<ReportType, DeviceType>::getReports(
+    std::vector<int> &reporters_out, std::vector<report_type> &reports_out) {
+  int num_reports = getNumReports();
+  reporters_out.clear();
+  reporters_out.reserve(num_reports);
+  reports_out.clear();
+  reports_out.reserve(num_reports);
+
+  if (num_reports > 0) {
+    m_reports.template sync<host_mirror_space>();
+    m_reporters.template sync<host_mirror_space>();
+
+    for (int i = 0; i < num_reports; ++i) {
+      reporters_out.push_back(m_reporters.h_view(i));
+      reports_out.push_back(m_reports.h_view(i));
+    }
+  }
+}
+
+template <typename ReportType, typename DeviceType>
+void ErrorReporter<ReportType, DeviceType>::getReports(
+    typename Kokkos::View<
+        int *, typename DeviceType::execution_space>::HostMirror &reporters_out,
+    typename Kokkos::View<report_type *,
+                          typename DeviceType::execution_space>::HostMirror
+        &reports_out) {
+  int num_reports = getNumReports();
+  reporters_out   = typename Kokkos::View<int *, DeviceType>::HostMirror(
+      "ErrorReport::reporters_out", num_reports);
+  reports_out = typename Kokkos::View<report_type *, DeviceType>::HostMirror(
+      "ErrorReport::reports_out", num_reports);
+
+  if (num_reports > 0) {
+    m_reports.template sync<host_mirror_space>();
+    m_reporters.template sync<host_mirror_space>();
+
+    for (int i = 0; i < num_reports; ++i) {
+      reporters_out(i) = m_reporters.h_view(i);
+      reports_out(i)   = m_reports.h_view(i);
+    }
+  }
+}
+
+template <typename ReportType, typename DeviceType>
+void ErrorReporter<ReportType, DeviceType>::clear() {
+  int num_reports = 0;
+  Kokkos::deep_copy(m_numReportsAttempted, num_reports);
+  m_reports.template modify<execution_space>();
+  m_reporters.template modify<execution_space>();
+}
+
+template <typename ReportType, typename DeviceType>
+void ErrorReporter<ReportType, DeviceType>::resize(const size_t new_size) {
+  m_reports.resize(new_size);
+  m_reporters.resize(new_size);
+  typename DeviceType::execution_space().fence(
+      "Kokkos::Experimental::ErrorReporter::resize: fence after resizing");
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ERRORREPORTER
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ERRORREPORTER
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_Functional.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_Functional.hpp
new file mode 100644 (file)
index 0000000..478a087
--- /dev/null
@@ -0,0 +1,183 @@
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+
+#ifndef KOKKOS_FUNCTIONAL_HPP
+#define KOKKOS_FUNCTIONAL_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Functional_impl.hpp>
+
+namespace Kokkos {
+
+// These should work for most types
+
+template <typename T>
+struct pod_hash {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using argument_type KOKKOS_DEPRECATED        = T;
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = uint32_t;
+  using result_type KOKKOS_DEPRECATED          = uint32_t;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  uint32_t operator()(T const& t) const {
+    return Impl::MurmurHash3_x86_32(&t, sizeof(T), 0);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  uint32_t operator()(T const& t, uint32_t seed) const {
+    return Impl::MurmurHash3_x86_32(&t, sizeof(T), seed);
+  }
+};
+
+template <typename T>
+struct pod_equal_to {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const {
+    return Impl::bitwise_equal(&a, &b);
+  }
+};
+
+template <typename T>
+struct pod_not_equal_to {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const {
+    return !Impl::bitwise_equal(&a, &b);
+  }
+};
+
+template <typename T>
+struct equal_to {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const { return a == b; }
+};
+
+template <typename T>
+struct not_equal_to {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const { return a != b; }
+};
+
+template <typename T>
+struct greater {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const { return a > b; }
+};
+
+template <typename T>
+struct less {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const { return a < b; }
+};
+
+template <typename T>
+struct greater_equal {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const { return a >= b; }
+};
+
+template <typename T>
+struct less_equal {
+#if defined KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using first_argument_type KOKKOS_DEPRECATED  = T;
+  using second_argument_type KOKKOS_DEPRECATED = T;
+  using result_type KOKKOS_DEPRECATED          = bool;
+#endif
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool operator()(T const& a, T const& b) const { return a <= b; }
+};
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_FUNCTIONAL
+#endif
+#endif  // KOKKOS_FUNCTIONAL_HPP
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_OffsetView.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_OffsetView.hpp
new file mode 100644 (file)
index 0000000..0b54d1b
--- /dev/null
@@ -0,0 +1,2100 @@
+/*
+ * Kokkos_OffsetView.hpp
+ *
+ *  Created on: Apr 23, 2018
+ *      Author: swbova
+ */
+
+#ifndef KOKKOS_OFFSETVIEW_HPP_
+#define KOKKOS_OFFSETVIEW_HPP_
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#include <Kokkos_View.hpp>
+
+namespace Kokkos {
+
+namespace Experimental {
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class DataType, class... Properties>
+class OffsetView;
+
+template <class>
+struct is_offset_view : public std::false_type {};
+
+template <class D, class... P>
+struct is_offset_view<OffsetView<D, P...>> : public std::true_type {};
+
+template <class D, class... P>
+struct is_offset_view<const OffsetView<D, P...>> : public std::true_type {};
+
+#define KOKKOS_INVALID_OFFSET int64_t(0x7FFFFFFFFFFFFFFFLL)
+#define KOKKOS_INVALID_INDEX_RANGE \
+  { KOKKOS_INVALID_OFFSET, KOKKOS_INVALID_OFFSET }
+
+template <typename iType, std::enable_if_t<std::is_integral<iType>::value &&
+                                               std::is_signed<iType>::value,
+                                           iType> = 0>
+using IndexRange = Kokkos::Array<iType, 2>;
+
+using index_list_type = std::initializer_list<int64_t>;
+
+//  template <typename iType,
+//    std::enable_if_t< std::is_integral<iType>::value &&
+//      std::is_signed<iType>::value, iType > = 0> using min_index_type =
+//      std::initializer_list<iType>;
+
+namespace Impl {
+
+template <class ViewType>
+struct GetOffsetViewTypeFromViewType {
+  using type =
+      OffsetView<typename ViewType::data_type, typename ViewType::array_layout,
+                 typename ViewType::device_type,
+                 typename ViewType::memory_traits>;
+};
+
+template <unsigned, class MapType, class BeginsType>
+KOKKOS_INLINE_FUNCTION bool offsetview_verify_operator_bounds(
+    const MapType&, const BeginsType&) {
+  return true;
+}
+
+template <unsigned R, class MapType, class BeginsType, class iType,
+          class... Args>
+KOKKOS_INLINE_FUNCTION bool offsetview_verify_operator_bounds(
+    const MapType& map, const BeginsType& begins, const iType& i,
+    Args... args) {
+  const bool legalIndex =
+      (int64_t(i) >= begins[R]) &&
+      (int64_t(i) <= int64_t(begins[R] + map.extent(R) - 1));
+  return legalIndex &&
+         offsetview_verify_operator_bounds<R + 1>(map, begins, args...);
+}
+template <unsigned, class MapType, class BeginsType>
+inline void offsetview_error_operator_bounds(char*, int, const MapType&,
+                                             const BeginsType&) {}
+
+template <unsigned R, class MapType, class BeginsType, class iType,
+          class... Args>
+inline void offsetview_error_operator_bounds(char* buf, int len,
+                                             const MapType& map,
+                                             const BeginsType begins,
+                                             const iType& i, Args... args) {
+  const int64_t b = begins[R];
+  const int64_t e = b + map.extent(R) - 1;
+  const int n =
+      snprintf(buf, len, " %ld <= %ld <= %ld %c", static_cast<unsigned long>(b),
+               static_cast<unsigned long>(i), static_cast<unsigned long>(e),
+               (sizeof...(Args) ? ',' : ')'));
+  offsetview_error_operator_bounds<R + 1>(buf + n, len - n, map, begins,
+                                          args...);
+}
+
+template <class MemorySpace, class MapType, class BeginsType, class... Args>
+KOKKOS_INLINE_FUNCTION void offsetview_verify_operator_bounds(
+    Kokkos::Impl::SharedAllocationTracker const& tracker, const MapType& map,
+    const BeginsType& begins, Args... args) {
+  if (!offsetview_verify_operator_bounds<0>(map, begins, args...)) {
+    KOKKOS_IF_ON_HOST(
+        (enum {LEN = 1024}; char buffer[LEN];
+         const std::string label = tracker.template get_label<MemorySpace>();
+         int n                   = snprintf(buffer, LEN,
+                          "OffsetView bounds error of view labeled %s (",
+                          label.c_str());
+         offsetview_error_operator_bounds<0>(buffer + n, LEN - n, map, begins,
+                                             args...);
+         Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
+
+    KOKKOS_IF_ON_DEVICE((
+        /* Check #1: is there a SharedAllocationRecord?
+          (we won't use it, but if it is not there then there isn't
+           a corresponding SharedAllocationHeader containing a label).
+          This check should cover the case of Views that don't
+          have the Unmanaged trait but were initialized by pointer. */
+        if (tracker.has_record()) {
+          Kokkos::Impl::operator_bounds_error_on_device(map);
+        } else { Kokkos::abort("OffsetView bounds error"); }))
+  }
+}
+
+inline void runtime_check_rank_host(const size_t rank_dynamic,
+                                    const size_t rank,
+                                    const index_list_type minIndices,
+                                    const std::string& label) {
+  bool isBad = false;
+  std::string message =
+      "Kokkos::Experimental::OffsetView ERROR: for OffsetView labeled '" +
+      label + "':";
+  if (rank_dynamic != rank) {
+    message +=
+        "The full rank must be the same as the dynamic rank. full rank = ";
+    message += std::to_string(rank) +
+               " dynamic rank = " + std::to_string(rank_dynamic) + "\n";
+    isBad = true;
+  }
+
+  size_t numOffsets = 0;
+  for (size_t i = 0; i < minIndices.size(); ++i) {
+    if (minIndices.begin()[i] != KOKKOS_INVALID_OFFSET) numOffsets++;
+  }
+  if (numOffsets != rank_dynamic) {
+    message += "The number of offsets provided ( " +
+               std::to_string(numOffsets) +
+               " ) must equal the dynamic rank ( " +
+               std::to_string(rank_dynamic) + " ).";
+    isBad = true;
+  }
+
+  if (isBad) Kokkos::abort(message.c_str());
+}
+
+KOKKOS_INLINE_FUNCTION
+void runtime_check_rank_device(const size_t rank_dynamic, const size_t rank,
+                               const index_list_type minIndices) {
+  if (rank_dynamic != rank) {
+    Kokkos::abort(
+        "The full rank of an OffsetView must be the same as the dynamic rank.");
+  }
+  size_t numOffsets = 0;
+  for (size_t i = 0; i < minIndices.size(); ++i) {
+    if (minIndices.begin()[i] != KOKKOS_INVALID_OFFSET) numOffsets++;
+  }
+  if (numOffsets != rank) {
+    Kokkos::abort(
+        "The number of offsets provided to an OffsetView constructor must "
+        "equal the dynamic rank.");
+  }
+}
+}  // namespace Impl
+
+template <class DataType, class... Properties>
+class OffsetView : public ViewTraits<DataType, Properties...> {
+ public:
+  using traits = ViewTraits<DataType, Properties...>;
+
+ private:
+  template <class, class...>
+  friend class OffsetView;
+  template <class, class...>
+  friend class View;  // FIXME delete this line
+  template <class, class...>
+  friend class Kokkos::Impl::ViewMapping;
+
+  using map_type   = Kokkos::Impl::ViewMapping<traits, void>;
+  using track_type = Kokkos::Impl::SharedAllocationTracker;
+
+ public:
+  enum { Rank = map_type::Rank };
+  using begins_type = Kokkos::Array<int64_t, Rank>;
+
+  template <typename iType,
+            std::enable_if_t<std::is_integral<iType>::value, iType> = 0>
+  KOKKOS_INLINE_FUNCTION int64_t begin(const iType local_dimension) const {
+    return local_dimension < Rank ? m_begins[local_dimension]
+                                  : KOKKOS_INVALID_OFFSET;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  begins_type begins() const { return m_begins; }
+
+  template <typename iType,
+            std::enable_if_t<std::is_integral<iType>::value, iType> = 0>
+  KOKKOS_INLINE_FUNCTION int64_t end(const iType local_dimension) const {
+    return begin(local_dimension) + m_map.extent(local_dimension);
+  }
+
+ private:
+  track_type m_track;
+  map_type m_map;
+  begins_type m_begins;
+
+ public:
+  //----------------------------------------
+  /** \brief  Compatible view of array of scalar types */
+  using array_type =
+      OffsetView<typename traits::scalar_array_type,
+                 typename traits::array_layout, typename traits::device_type,
+                 typename traits::memory_traits>;
+
+  /** \brief  Compatible view of const data type */
+  using const_type =
+      OffsetView<typename traits::const_data_type,
+                 typename traits::array_layout, typename traits::device_type,
+                 typename traits::memory_traits>;
+
+  /** \brief  Compatible view of non-const data type */
+  using non_const_type =
+      OffsetView<typename traits::non_const_data_type,
+                 typename traits::array_layout, typename traits::device_type,
+                 typename traits::memory_traits>;
+
+  /** \brief  Compatible HostMirror view */
+  using HostMirror = OffsetView<typename traits::non_const_data_type,
+                                typename traits::array_layout,
+                                typename traits::host_mirror_space>;
+
+  //----------------------------------------
+  // Domain rank and extents
+
+  /** \brief rank() to be implemented
+   */
+  // KOKKOS_INLINE_FUNCTION
+  // static
+  // constexpr unsigned rank() { return map_type::Rank; }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, size_t>
+  extent(const iType& r) const {
+    return m_map.extent(r);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, int>
+  extent_int(const iType& r) const {
+    return static_cast<int>(m_map.extent(r));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr typename traits::array_layout layout()
+      const {
+    return m_map.layout();
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t size() const {
+    return m_map.dimension_0() * m_map.dimension_1() * m_map.dimension_2() *
+           m_map.dimension_3() * m_map.dimension_4() * m_map.dimension_5() *
+           m_map.dimension_6() * m_map.dimension_7();
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
+    return m_map.stride_0();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
+    return m_map.stride_1();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
+    return m_map.stride_2();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
+    return m_map.stride_3();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
+    return m_map.stride_4();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
+    return m_map.stride_5();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
+    return m_map.stride_6();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
+    return m_map.stride_7();
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, size_t>
+  stride(iType r) const {
+    return (
+        r == 0
+            ? m_map.stride_0()
+            : (r == 1
+                   ? m_map.stride_1()
+                   : (r == 2
+                          ? m_map.stride_2()
+                          : (r == 3
+                                 ? m_map.stride_3()
+                                 : (r == 4
+                                        ? m_map.stride_4()
+                                        : (r == 5
+                                               ? m_map.stride_5()
+                                               : (r == 6
+                                                      ? m_map.stride_6()
+                                                      : m_map.stride_7())))))));
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    m_map.stride(s);
+  }
+
+  //----------------------------------------
+  // Range span is the span which contains all members.
+
+  using reference_type = typename map_type::reference_type;
+  using pointer_type   = typename map_type::pointer_type;
+
+  enum {
+    reference_type_is_lvalue_reference =
+        std::is_lvalue_reference<reference_type>::value
+  };
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_map.span(); }
+  KOKKOS_INLINE_FUNCTION bool span_is_contiguous() const {
+    return m_map.span_is_contiguous();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return m_map.data() != nullptr;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
+    return m_map.data();
+  }
+
+  //----------------------------------------
+  // Allow specializations to query their specialized map
+
+  KOKKOS_INLINE_FUNCTION
+  const Kokkos::Impl::ViewMapping<traits, void>& implementation_map() const {
+    return m_map;
+  }
+
+  //----------------------------------------
+
+ private:
+  static constexpr bool is_layout_left =
+      std::is_same<typename traits::array_layout, Kokkos::LayoutLeft>::value;
+
+  static constexpr bool is_layout_right =
+      std::is_same<typename traits::array_layout, Kokkos::LayoutRight>::value;
+
+  static constexpr bool is_layout_stride =
+      std::is_same<typename traits::array_layout, Kokkos::LayoutStride>::value;
+
+  static constexpr bool is_default_map =
+      std::is_void<typename traits::specialize>::value &&
+      (is_layout_left || is_layout_right || is_layout_stride);
+
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+
+#define KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(ARG)                      \
+  Kokkos::Impl::runtime_check_memory_access_violation<                   \
+      typename traits::memory_space>(                                    \
+      "Kokkos::OffsetView ERROR: attempt to access inaccessible memory " \
+      "space");                                                          \
+  Kokkos::Experimental::Impl::offsetview_verify_operator_bounds<         \
+      typename traits::memory_space>                                     \
+      ARG;
+
+#else
+
+#define KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(ARG)                      \
+  Kokkos::Impl::runtime_check_memory_access_violation<                   \
+      typename traits::memory_space>(                                    \
+      "Kokkos::OffsetView ERROR: attempt to access inaccessible memory " \
+      "space");
+
+#endif
+ public:
+  //------------------------------
+  // Rank 0 operator()
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  reference_type operator()() const { return m_map.reference(); }
+  //------------------------------
+  // Rank 1 operator()
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0>::value && (1 == Rank) && !is_default_map),
+      reference_type>
+  operator()(const I0& i0) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
+    const size_t j0 = i0 - m_begins[0];
+    return m_map.reference(j0);
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
+                        is_default_map && !is_layout_stride),
+                       reference_type>
+      operator()(const I0& i0) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
+    const size_t j0 = i0 - m_begins[0];
+    return m_map.m_impl_handle[j0];
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
+                        is_default_map && is_layout_stride),
+                       reference_type>
+      operator()(const I0& i0) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
+    const size_t j0 = i0 - m_begins[0];
+    return m_map.m_impl_handle[m_map.m_impl_offset.m_stride.S0 * j0];
+  }
+  //------------------------------
+  // Rank 1 operator[]
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0>::value && (1 == Rank) && !is_default_map),
+      reference_type>
+  operator[](const I0& i0) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
+    const size_t j0 = i0 - m_begins[0];
+    return m_map.reference(j0);
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
+                        is_default_map && !is_layout_stride),
+                       reference_type>
+      operator[](const I0& i0) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
+    const size_t j0 = i0 - m_begins[0];
+    return m_map.m_impl_handle[j0];
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0>::value && (1 == Rank) &&
+                        is_default_map && is_layout_stride),
+                       reference_type>
+      operator[](const I0& i0) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0))
+    const size_t j0 = i0 - m_begins[0];
+    return m_map.m_impl_handle[m_map.m_impl_offset.m_stride.S0 * j0];
+  }
+
+  //------------------------------
+  // Rank 2
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1>::value &&
+                        (2 == Rank) && !is_default_map),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    return m_map.reference(j0, j1);
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
+       is_default_map && is_layout_left && (traits::rank_dynamic == 0)),
+      reference_type>
+  operator()(const I0& i0, const I1& i1) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    return m_map.m_impl_handle[j0 + m_map.m_impl_offset.m_dim.N0 * j1];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
+       is_default_map && is_layout_left && (traits::rank_dynamic != 0)),
+      reference_type>
+  operator()(const I0& i0, const I1& i1) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    return m_map.m_impl_handle[j0 + m_map.m_impl_offset.m_stride * j1];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
+       is_default_map && is_layout_right && (traits::rank_dynamic == 0)),
+      reference_type>
+  operator()(const I0& i0, const I1& i1) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    return m_map.m_impl_handle[j1 + m_map.m_impl_offset.m_dim.N1 * j0];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1>::value && (2 == Rank) &&
+       is_default_map && is_layout_right && (traits::rank_dynamic != 0)),
+      reference_type>
+  operator()(const I0& i0, const I1& i1) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    return m_map.m_impl_handle[j1 + m_map.m_impl_offset.m_stride * j0];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1>::value &&
+                        (2 == Rank) && is_default_map && is_layout_stride),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY((m_track, m_map, m_begins, i0, i1))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    return m_map.m_impl_handle[j0 * m_map.m_impl_offset.m_stride.S0 +
+                               j1 * m_map.m_impl_offset.m_stride.S1];
+  }
+
+  //------------------------------
+  // Rank 3
+
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2>::value &&
+                        (3 == Rank) && is_default_map),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1, const I2& i2) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2)];
+  }
+
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2>::value &&
+                        (3 == Rank) && !is_default_map),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1, const I2& i2) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    return m_map.reference(j0, j1, j2);
+  }
+
+  //------------------------------
+  // Rank 4
+
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3>::value &&
+                        (4 == Rank) && is_default_map),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3>::value &&
+                        (4 == Rank) && !is_default_map),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    return m_map.reference(j0, j1, j2, j3);
+  }
+
+  //------------------------------
+  // Rank 5
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3, I4>::value &&
+                        (5 == Rank) && is_default_map),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+                 const I4& i4) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::are_integral<I0, I1, I2, I3, I4>::value &&
+                        (5 == Rank) && !is_default_map),
+                       reference_type>
+      operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+                 const I4& i4) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    return m_map.reference(j0, j1, j2, j3, j4);
+  }
+
+  //------------------------------
+  // Rank 6
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5>::value &&
+       (6 == Rank) && is_default_map),
+      reference_type>
+  operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+             const I4& i4, const I5& i5) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    const size_t j5 = i5 - m_begins[5];
+    return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4, j5)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5>::value &&
+       (6 == Rank) && !is_default_map),
+      reference_type>
+  operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+             const I4& i4, const I5& i5) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    const size_t j5 = i5 - m_begins[5];
+    return m_map.reference(j0, j1, j2, j3, j4, j5);
+  }
+
+  //------------------------------
+  // Rank 7
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6>::value &&
+       (7 == Rank) && is_default_map),
+      reference_type>
+  operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+             const I4& i4, const I5& i5, const I6& i6) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    const size_t j5 = i5 - m_begins[5];
+    const size_t j6 = i6 - m_begins[6];
+    return m_map.m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4, j5, j6)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6>::value &&
+       (7 == Rank) && !is_default_map),
+      reference_type>
+  operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+             const I4& i4, const I5& i5, const I6& i6) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    const size_t j5 = i5 - m_begins[5];
+    const size_t j6 = i6 - m_begins[6];
+    return m_map.reference(j0, j1, j2, j3, j4, j5, j6);
+  }
+
+  //------------------------------
+  // Rank 8
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6, I7>::value &&
+       (8 == Rank) && is_default_map),
+      reference_type>
+  operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+             const I4& i4, const I5& i5, const I6& i6, const I7& i7) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6, i7))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    const size_t j5 = i5 - m_begins[5];
+    const size_t j6 = i6 - m_begins[6];
+    const size_t j7 = i7 - m_begins[7];
+    return m_map
+        .m_impl_handle[m_map.m_impl_offset(j0, j1, j2, j3, j4, j5, j6, j7)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::are_integral<I0, I1, I2, I3, I4, I5, I6, I7>::value &&
+       (8 == Rank) && !is_default_map),
+      reference_type>
+  operator()(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+             const I4& i4, const I5& i5, const I6& i6, const I7& i7) const {
+    KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY(
+        (m_track, m_map, m_begins, i0, i1, i2, i3, i4, i5, i6, i7))
+    const size_t j0 = i0 - m_begins[0];
+    const size_t j1 = i1 - m_begins[1];
+    const size_t j2 = i2 - m_begins[2];
+    const size_t j3 = i3 - m_begins[3];
+    const size_t j4 = i4 - m_begins[4];
+    const size_t j5 = i5 - m_begins[5];
+    const size_t j6 = i6 - m_begins[6];
+    const size_t j7 = i7 - m_begins[7];
+    return m_map.reference(j0, j1, j2, j3, j4, j5, j6, j7);
+  }
+
+#undef KOKKOS_IMPL_OFFSETVIEW_OPERATOR_VERIFY
+
+  //----------------------------------------
+  // Standard destructor, constructors, and assignment operators
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~OffsetView() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView() : m_track(), m_map() {
+    for (size_t i = 0; i < Rank; ++i) m_begins[i] = KOKKOS_INVALID_OFFSET;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView(const OffsetView& rhs)
+      : m_track(rhs.m_track, traits::is_managed),
+        m_map(rhs.m_map),
+        m_begins(rhs.m_begins) {}
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView(OffsetView&& rhs)
+      : m_track(std::move(rhs.m_track)),
+        m_map(std::move(rhs.m_map)),
+        m_begins(std::move(rhs.m_begins)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView& operator=(const OffsetView& rhs) {
+    m_track  = rhs.m_track;
+    m_map    = rhs.m_map;
+    m_begins = rhs.m_begins;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView& operator=(OffsetView&& rhs) {
+    m_track  = std::move(rhs.m_track);
+    m_map    = std::move(rhs.m_map);
+    m_begins = std::move(rhs.m_begins);
+    return *this;
+  }
+
+  // interoperability with View
+ private:
+  using view_type =
+      View<typename traits::scalar_array_type, typename traits::array_layout,
+           typename traits::device_type, typename traits::memory_traits>;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  view_type view() const {
+    view_type v(m_track, m_map);
+    return v;
+  }
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION OffsetView(const View<RT, RP...>& aview)
+      : m_track(aview.impl_track()), m_map() {
+    using SrcTraits = typename OffsetView<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible OffsetView copy construction");
+    Mapping::assign(m_map, aview.impl_map(), m_track);
+
+    for (int i = 0; i < aview.Rank; ++i) {
+      m_begins[i] = 0;
+    }
+  }
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION OffsetView(const View<RT, RP...>& aview,
+                                    const index_list_type& minIndices)
+      : m_track(aview.impl_track()), m_map() {
+    using SrcTraits = typename OffsetView<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible OffsetView copy construction");
+    Mapping::assign(m_map, aview.impl_map(), m_track);
+
+    KOKKOS_IF_ON_HOST((Kokkos::Experimental::Impl::runtime_check_rank_host(
+                           traits::rank_dynamic, Rank, minIndices, label());))
+
+    KOKKOS_IF_ON_DEVICE((Kokkos::Experimental::Impl::runtime_check_rank_device(
+                             traits::rank_dynamic, Rank, minIndices);))
+
+    for (size_t i = 0; i < minIndices.size(); ++i) {
+      m_begins[i] = minIndices.begin()[i];
+    }
+  }
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION OffsetView(const View<RT, RP...>& aview,
+                                    const begins_type& beg)
+      : m_track(aview.impl_track()), m_map(), m_begins(beg) {
+    using SrcTraits = typename OffsetView<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible OffsetView copy construction");
+    Mapping::assign(m_map, aview.impl_map(), m_track);
+  }
+
+  // may assign unmanaged from managed.
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION OffsetView(const OffsetView<RT, RP...>& rhs)
+      : m_track(rhs.m_track, traits::is_managed),
+        m_map(),
+        m_begins(rhs.m_begins) {
+    using SrcTraits = typename OffsetView<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits, void>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible OffsetView copy construction");
+    Mapping::assign(m_map, rhs.m_map, rhs.m_track);  // swb what about assign?
+  }
+
+ private:
+  enum class subtraction_failure {
+    none,
+    negative,
+    overflow,
+  };
+
+  // Subtraction should return a non-negative number and not overflow
+  KOKKOS_INLINE_FUNCTION static subtraction_failure check_subtraction(
+      int64_t lhs, int64_t rhs) {
+    if (lhs < rhs) return subtraction_failure::negative;
+
+    if (static_cast<uint64_t>(-1) / static_cast<uint64_t>(2) <
+        static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs))
+      return subtraction_failure::overflow;
+
+    return subtraction_failure::none;
+  }
+
+  // Need a way to get at an element from both begins_type (aka Kokkos::Array
+  // which doesn't have iterators) and index_list_type (aka
+  // std::initializer_list which doesn't have .data() or operator[]).
+  // Returns by value
+  KOKKOS_INLINE_FUNCTION
+  static int64_t at(const begins_type& a, size_t pos) { return a[pos]; }
+
+  KOKKOS_INLINE_FUNCTION
+  static int64_t at(index_list_type a, size_t pos) {
+    return *(a.begin() + pos);
+  }
+
+  // Check that begins < ends for all elements
+  // B, E can be begins_type and/or index_list_type
+  template <typename B, typename E>
+  static subtraction_failure runtime_check_begins_ends_host(const B& begins,
+                                                            const E& ends) {
+    std::string message;
+    if (begins.size() != Rank)
+      message +=
+          "begins.size() "
+          "(" +
+          std::to_string(begins.size()) +
+          ")"
+          " != Rank "
+          "(" +
+          std::to_string(Rank) +
+          ")"
+          "\n";
+
+    if (ends.size() != Rank)
+      message +=
+          "ends.size() "
+          "(" +
+          std::to_string(begins.size()) +
+          ")"
+          " != Rank "
+          "(" +
+          std::to_string(Rank) +
+          ")"
+          "\n";
+
+    // If there are no errors so far, then rank == Rank
+    // Otherwise, check as much as possible
+    size_t rank = begins.size() < ends.size() ? begins.size() : ends.size();
+    for (size_t i = 0; i != rank; ++i) {
+      subtraction_failure sf = check_subtraction(at(ends, i), at(begins, i));
+      if (sf != subtraction_failure::none) {
+        message +=
+            "("
+            "ends[" +
+            std::to_string(i) +
+            "]"
+            " "
+            "(" +
+            std::to_string(at(ends, i)) +
+            ")"
+            " - "
+            "begins[" +
+            std::to_string(i) +
+            "]"
+            " "
+            "(" +
+            std::to_string(at(begins, i)) +
+            ")"
+            ")";
+        switch (sf) {
+          case subtraction_failure::negative:
+            message += " must be non-negative\n";
+            break;
+          case subtraction_failure::overflow: message += " overflows\n"; break;
+          default: break;
+        }
+      }
+    }
+
+    if (!message.empty()) {
+      message =
+          "Kokkos::Experimental::OffsetView ERROR: for unmanaged OffsetView\n" +
+          message;
+      Kokkos::Impl::throw_runtime_exception(message);
+    }
+
+    return subtraction_failure::none;
+  }
+
+  // Check the begins < ends for all elements
+  template <typename B, typename E>
+  KOKKOS_INLINE_FUNCTION static subtraction_failure
+  runtime_check_begins_ends_device(const B& begins, const E& ends) {
+    if (begins.size() != Rank)
+      Kokkos::abort(
+          "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+          "OffsetView: begins has bad Rank");
+    if (ends.size() != Rank)
+      Kokkos::abort(
+          "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+          "OffsetView: ends has bad Rank");
+
+    for (size_t i = 0; i != begins.size(); ++i) {
+      switch (check_subtraction(at(ends, i), at(begins, i))) {
+        case subtraction_failure::negative:
+          Kokkos::abort(
+              "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+              "OffsetView: bad range");
+          break;
+        case subtraction_failure::overflow:
+          Kokkos::abort(
+              "Kokkos::Experimental::OffsetView ERROR: for unmanaged "
+              "OffsetView: range overflows");
+          break;
+        default: break;
+      }
+    }
+
+    return subtraction_failure::none;
+  }
+
+  template <typename B, typename E>
+  KOKKOS_INLINE_FUNCTION static subtraction_failure runtime_check_begins_ends(
+      const B& begins, const E& ends) {
+    KOKKOS_IF_ON_HOST((return runtime_check_begins_ends_host(begins, ends);))
+    KOKKOS_IF_ON_DEVICE(
+        (return runtime_check_begins_ends_device(begins, ends);))
+  }
+
+  // Constructor around unmanaged data after checking begins < ends for all
+  // elements
+  // Each of B, E can be begins_type and/or index_list_type
+  // Precondition: begins.size() == ends.size() == m_begins.size() == Rank
+  template <typename B, typename E>
+  KOKKOS_INLINE_FUNCTION OffsetView(const pointer_type& p, const B& begins_,
+                                    const E& ends_,
+                                    subtraction_failure)
+      : m_track()  // no tracking
+        ,
+        m_map(Kokkos::Impl::ViewCtorProp<pointer_type>(p),
+              typename traits::array_layout(
+                  Rank > 0 ? at(ends_, 0) - at(begins_, 0) : 0,
+                  Rank > 1 ? at(ends_, 1) - at(begins_, 1) : 0,
+                  Rank > 2 ? at(ends_, 2) - at(begins_, 2) : 0,
+                  Rank > 3 ? at(ends_, 3) - at(begins_, 3) : 0,
+                  Rank > 4 ? at(ends_, 4) - at(begins_, 4) : 0,
+                  Rank > 5 ? at(ends_, 5) - at(begins_, 5) : 0,
+                  Rank > 6 ? at(ends_, 6) - at(begins_, 6) : 0,
+                  Rank > 7 ? at(ends_, 7) - at(begins_, 7) : 0)) {
+    for (size_t i = 0; i != m_begins.size(); ++i) {
+      m_begins[i] = at(begins_, i);
+    };
+  }
+
+ public:
+  // Constructor around unmanaged data
+  // Four overloads, as both begins and ends can be either
+  // begins_type or index_list_type
+  KOKKOS_INLINE_FUNCTION
+  OffsetView(const pointer_type& p, const begins_type& begins_,
+             const begins_type& ends_)
+      : OffsetView(p, begins_, ends_,
+                   runtime_check_begins_ends(begins_, ends_)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView(const pointer_type& p, const begins_type& begins_,
+             index_list_type ends_)
+      : OffsetView(p, begins_, ends_,
+                   runtime_check_begins_ends(begins_, ends_)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView(const pointer_type& p, index_list_type begins_,
+             const begins_type& ends_)
+      : OffsetView(p, begins_, ends_,
+                   runtime_check_begins_ends(begins_, ends_)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  OffsetView(const pointer_type& p, index_list_type begins_,
+             index_list_type ends_)
+      : OffsetView(p, begins_, ends_,
+                   runtime_check_begins_ends(begins_, ends_)) {}
+
+  //----------------------------------------
+  // Allocation tracking properties
+  KOKKOS_INLINE_FUNCTION
+  int use_count() const { return m_track.use_count(); }
+
+  inline const std::string label() const {
+    return m_track.template get_label<typename traits::memory_space>();
+  }
+
+  // Choosing std::pair as type for the arguments allows constructing an
+  // OffsetView using list initialization syntax, e.g.,
+  //   OffsetView dummy("dummy", {-1, 3}, {-2,2});
+  // We could allow arbitrary types RangeType that support
+  // std::get<{0,1}>(RangeType const&) with std::tuple_size<RangeType>::value==2
+  // but this wouldn't allow using the syntax in the example above.
+  template <typename Label>
+  explicit inline OffsetView(
+      const Label& arg_label,
+      std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
+                       const std::pair<int64_t, int64_t>>
+          range0,
+      const std::pair<int64_t, int64_t> range1 = KOKKOS_INVALID_INDEX_RANGE,
+      const std::pair<int64_t, int64_t> range2 = KOKKOS_INVALID_INDEX_RANGE,
+      const std::pair<int64_t, int64_t> range3 = KOKKOS_INVALID_INDEX_RANGE,
+      const std::pair<int64_t, int64_t> range4 = KOKKOS_INVALID_INDEX_RANGE,
+      const std::pair<int64_t, int64_t> range5 = KOKKOS_INVALID_INDEX_RANGE,
+      const std::pair<int64_t, int64_t> range6 = KOKKOS_INVALID_INDEX_RANGE,
+      const std::pair<int64_t, int64_t> range7 = KOKKOS_INVALID_INDEX_RANGE
+
+      )
+      : OffsetView(
+            Kokkos::Impl::ViewCtorProp<std::string>(arg_label),
+            typename traits::array_layout(range0.second - range0.first + 1,
+                                          range1.second - range1.first + 1,
+                                          range2.second - range2.first + 1,
+                                          range3.second - range3.first + 1,
+                                          range4.second - range4.first + 1,
+                                          range5.second - range5.first + 1,
+                                          range6.second - range6.first + 1,
+                                          range7.second - range7.first + 1),
+            {range0.first, range1.first, range2.first, range3.first,
+             range4.first, range5.first, range6.first, range7.first}) {}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  template <typename Label>
+  KOKKOS_DEPRECATED_WITH_COMMENT(
+      "Use the constructor taking std::pair<int64_t, int64_t> arguments "
+      "instead!")
+  explicit inline OffsetView(
+      const Label& arg_label,
+      std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
+                       const index_list_type>
+          range0,
+      const index_list_type range1 = KOKKOS_INVALID_INDEX_RANGE,
+      const index_list_type range2 = KOKKOS_INVALID_INDEX_RANGE,
+      const index_list_type range3 = KOKKOS_INVALID_INDEX_RANGE,
+      const index_list_type range4 = KOKKOS_INVALID_INDEX_RANGE,
+      const index_list_type range5 = KOKKOS_INVALID_INDEX_RANGE,
+      const index_list_type range6 = KOKKOS_INVALID_INDEX_RANGE,
+      const index_list_type range7 = KOKKOS_INVALID_INDEX_RANGE)
+      : OffsetView(
+            arg_label,
+            std::pair<int64_t, int64_t>(range0.begin()[0], range0.begin()[1]),
+            std::pair<int64_t, int64_t>(range1.begin()[0], range1.begin()[1]),
+            std::pair<int64_t, int64_t>(range2.begin()[0], range2.begin()[1]),
+            std::pair<int64_t, int64_t>(range3.begin()[0], range3.begin()[1]),
+            std::pair<int64_t, int64_t>(range4.begin()[0], range4.begin()[1]),
+            std::pair<int64_t, int64_t>(range5.begin()[0], range5.begin()[1]),
+            std::pair<int64_t, int64_t>(range6.begin()[0], range6.begin()[1]),
+            std::pair<int64_t, int64_t>(range7.begin()[0], range7.begin()[1])) {
+  }
+#endif
+
+  template <class... P>
+  explicit KOKKOS_INLINE_FUNCTION OffsetView(
+      const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+                       typename traits::array_layout> const& arg_layout,
+      const index_list_type minIndices)
+      : m_track()  // No memory tracking
+        ,
+        m_map(arg_prop, arg_layout) {
+    for (size_t i = 0; i < minIndices.size(); ++i) {
+      m_begins[i] = minIndices.begin()[i];
+    }
+    static_assert(
+        std::is_same<pointer_type, typename Kokkos::Impl::ViewCtorProp<
+                                       P...>::pointer_type>::value,
+        "When constructing OffsetView to wrap user memory, you must supply "
+        "matching pointer type");
+  }
+
+  template <class... P>
+  explicit inline OffsetView(
+      const Kokkos::Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<!Kokkos::Impl::ViewCtorProp<P...>::has_pointer,
+                       typename traits::array_layout> const& arg_layout,
+      const index_list_type minIndices)
+      : m_track(),
+        m_map()
+
+  {
+    for (size_t i = 0; i < Rank; ++i) m_begins[i] = minIndices.begin()[i];
+
+    // Append layout and spaces if not input
+    using alloc_prop_input = Kokkos::Impl::ViewCtorProp<P...>;
+
+    // use 'std::integral_constant<unsigned,I>' for non-types
+    // to avoid duplicate class error.
+    using alloc_prop = Kokkos::Impl::ViewCtorProp<
+        P...,
+        std::conditional_t<alloc_prop_input::has_label,
+                           std::integral_constant<unsigned, 0>, std::string>,
+        std::conditional_t<alloc_prop_input::has_memory_space,
+                           std::integral_constant<unsigned, 1>,
+                           typename traits::device_type::memory_space>,
+        std::conditional_t<alloc_prop_input::has_execution_space,
+                           std::integral_constant<unsigned, 2>,
+                           typename traits::device_type::execution_space>>;
+
+    static_assert(traits::is_managed,
+                  "OffsetView allocation constructor requires managed memory");
+
+    if (alloc_prop::initialize &&
+        !alloc_prop::execution_space::impl_is_initialized()) {
+      // If initializing view data then
+      // the execution space must be initialized.
+      Kokkos::Impl::throw_runtime_exception(
+          "Constructing OffsetView and initializing data with uninitialized "
+          "execution space");
+    }
+
+    // Copy the input allocation properties with possibly defaulted properties
+    alloc_prop prop_copy(arg_prop);
+
+    //------------------------------------------------------------
+#if defined(KOKKOS_ENABLE_CUDA)
+    // If allocating in CudaUVMSpace must fence before and after
+    // the allocation to protect against possible concurrent access
+    // on the CPU and the GPU.
+    // Fence using the trait's executon space (which will be Kokkos::Cuda)
+    // to avoid incomplete type errors from usng Kokkos::Cuda directly.
+    if (std::is_same<Kokkos::CudaUVMSpace,
+                     typename traits::device_type::memory_space>::value) {
+      typename traits::device_type::memory_space::execution_space().fence(
+          "Kokkos::OffsetView::OffsetView(): fence before UVM allocation");
+    }
+#endif
+    //------------------------------------------------------------
+
+    Kokkos::Impl::SharedAllocationRecord<>* record = m_map.allocate_shared(
+        prop_copy, arg_layout,
+        Kokkos::Impl::ViewCtorProp<P...>::has_execution_space);
+
+    //------------------------------------------------------------
+#if defined(KOKKOS_ENABLE_CUDA)
+    if (std::is_same<Kokkos::CudaUVMSpace,
+                     typename traits::device_type::memory_space>::value) {
+      typename traits::device_type::memory_space::execution_space().fence(
+          "Kokkos::OffsetView::OffsetView(): fence after UVM allocation");
+    }
+#endif
+    //------------------------------------------------------------
+
+    // Setup and initialization complete, start tracking
+    m_track.assign_allocated_record_to_uninitialized(record);
+
+    KOKKOS_IF_ON_HOST((Kokkos::Experimental::Impl::runtime_check_rank_host(
+                           traits::rank_dynamic, Rank, minIndices, label());))
+
+    KOKKOS_IF_ON_DEVICE((Kokkos::Experimental::Impl::runtime_check_rank_device(
+                             traits::rank_dynamic, Rank, minIndices);))
+  }
+};
+
+/** \brief Temporary free function rank()
+ *         until rank() is implemented
+ *         in the View
+ */
+template <typename D, class... P>
+KOKKOS_INLINE_FUNCTION constexpr unsigned rank(const OffsetView<D, P...>& V) {
+  return V.Rank;
+}  // Temporary until added to view
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+namespace Impl {
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, T>
+shift_input(const T arg, const int64_t offset) {
+  return arg - offset;
+}
+
+KOKKOS_INLINE_FUNCTION
+Kokkos::Impl::ALL_t shift_input(const Kokkos::Impl::ALL_t arg,
+                                const int64_t /*offset*/) {
+  return arg;
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_integral<T>::value, Kokkos::pair<T, T>>
+    shift_input(const Kokkos::pair<T, T> arg, const int64_t offset) {
+  return Kokkos::make_pair<T, T>(arg.first - offset, arg.second - offset);
+}
+template <class T>
+inline std::enable_if_t<std::is_integral<T>::value, std::pair<T, T>>
+shift_input(const std::pair<T, T> arg, const int64_t offset) {
+  return std::make_pair<T, T>(arg.first - offset, arg.second - offset);
+}
+
+template <size_t N, class Arg, class A>
+KOKKOS_INLINE_FUNCTION void map_arg_to_new_begin(
+    const size_t i, Kokkos::Array<int64_t, N>& subviewBegins,
+    std::enable_if_t<N != 0, const Arg> shiftedArg, const Arg arg,
+    const A viewBegins, size_t& counter) {
+  if (!std::is_integral<Arg>::value) {
+    subviewBegins[counter] = shiftedArg == arg ? viewBegins[i] : 0;
+    counter++;
+  }
+}
+
+template <size_t N, class Arg, class A>
+KOKKOS_INLINE_FUNCTION void map_arg_to_new_begin(
+    const size_t /*i*/, Kokkos::Array<int64_t, N>& /*subviewBegins*/,
+    std::enable_if_t<N == 0, const Arg> /*shiftedArg*/, const Arg /*arg*/,
+    const A /*viewBegins*/, size_t& /*counter*/) {}
+
+template <class D, class... P, class T>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<void /* deduce subview type from
+                                                   source view traits */
+                                           ,
+                                           ViewTraits<D, P...>, T>::type>::type
+    subview_offset(const OffsetView<D, P...>& src, T arg) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T shiftedArg = shift_input(arg, begins[0]);
+
+  constexpr size_t rank =
+      Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
+                                        traits */
+                                ,
+                                ViewTraits<D, P...>, T>::type::Rank;
+
+  auto theSubview = Kokkos::subview(theView, shiftedArg);
+
+  Kokkos::Array<int64_t, rank> subviewBegins;
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(0, subviewBegins, shiftedArg,
+                                                   arg, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<void /* deduce subview type from source
+                                                 view traits */
+                                         ,
+                                         ViewTraits<D, P...>, T>::type>::type
+      offsetView(theSubview, subviewBegins);
+
+  return offsetView;
+}
+
+template <class D, class... P, class T0, class T1>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, T0, T1>::type>::type
+    subview_offset(const Kokkos::Experimental::OffsetView<D, P...>& src,
+                   T0 arg0, T1 arg1) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T0 shiftedArg0 = shift_input(arg0, begins[0]);
+  T1 shiftedArg1 = shift_input(arg1, begins[1]);
+
+  auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1);
+  constexpr size_t rank =
+      Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
+                                        traits */
+                                ,
+                                ViewTraits<D, P...>, T0, T1>::type::Rank;
+
+  Kokkos::Array<int64_t, rank> subviewBegins;
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      0, subviewBegins, shiftedArg0, arg0, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      1, subviewBegins, shiftedArg1, arg1, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<
+          void /* deduce subview type from source view traits */
+          ,
+          ViewTraits<D, P...>, T0, T1>::type>::type offsetView(theSubview,
+                                                               subviewBegins);
+
+  return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, T0, T1, T2>::type>::type
+    subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T0 shiftedArg0 = shift_input(arg0, begins[0]);
+  T1 shiftedArg1 = shift_input(arg1, begins[1]);
+  T2 shiftedArg2 = shift_input(arg2, begins[2]);
+
+  auto theSubview =
+      Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2);
+
+  constexpr size_t rank =
+      Kokkos::Impl::ViewMapping<void /* deduce subview type from source view
+                                        traits */
+                                ,
+                                ViewTraits<D, P...>, T0, T1, T2>::type::Rank;
+
+  Kokkos::Array<int64_t, rank> subviewBegins;
+
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      0, subviewBegins, shiftedArg0, arg0, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      1, subviewBegins, shiftedArg1, arg1, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      2, subviewBegins, shiftedArg2, arg2, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<
+          void /* deduce subview type from source view traits */
+          ,
+          ViewTraits<D, P...>, T0, T1, T2>::type>::type
+      offsetView(theSubview, subviewBegins);
+
+  return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, T0, T1, T2, T3>::type>::type
+    subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+                   T3 arg3) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T0 shiftedArg0 = shift_input(arg0, begins[0]);
+  T1 shiftedArg1 = shift_input(arg1, begins[1]);
+  T2 shiftedArg2 = shift_input(arg2, begins[2]);
+  T3 shiftedArg3 = shift_input(arg3, begins[3]);
+
+  auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
+                                    shiftedArg2, shiftedArg3);
+
+  constexpr size_t rank = Kokkos::Impl::ViewMapping<
+      void /* deduce subview type from source view traits */
+      ,
+      ViewTraits<D, P...>, T0, T1, T2, T3>::type::Rank;
+  Kokkos::Array<int64_t, rank> subviewBegins;
+
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      0, subviewBegins, shiftedArg0, arg0, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      1, subviewBegins, shiftedArg1, arg1, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      2, subviewBegins, shiftedArg2, arg2, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      3, subviewBegins, shiftedArg3, arg3, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<
+          void /* deduce subview type from source view traits */
+          ,
+          ViewTraits<D, P...>, T0, T1, T2, T3>::type>::type
+      offsetView(theSubview, subviewBegins);
+
+  return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type>::type
+    subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+                   T3 arg3, T4 arg4) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T0 shiftedArg0 = shift_input(arg0, begins[0]);
+  T1 shiftedArg1 = shift_input(arg1, begins[1]);
+  T2 shiftedArg2 = shift_input(arg2, begins[2]);
+  T3 shiftedArg3 = shift_input(arg3, begins[3]);
+  T4 shiftedArg4 = shift_input(arg4, begins[4]);
+
+  auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
+                                    shiftedArg2, shiftedArg3, shiftedArg4);
+
+  constexpr size_t rank = Kokkos::Impl::ViewMapping<
+      void /* deduce subview type from source view traits */
+      ,
+      ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type::Rank;
+  Kokkos::Array<int64_t, rank> subviewBegins;
+
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      0, subviewBegins, shiftedArg0, arg0, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      1, subviewBegins, shiftedArg1, arg1, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      2, subviewBegins, shiftedArg2, arg2, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      3, subviewBegins, shiftedArg3, arg3, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      4, subviewBegins, shiftedArg4, arg4, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<
+          void /* deduce subview type from source view traits */
+          ,
+          ViewTraits<D, P...>, T0, T1, T2, T3, T4>::type>::type
+      offsetView(theSubview, subviewBegins);
+
+  return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
+          class T5>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type>::type
+    subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+                   T3 arg3, T4 arg4, T5 arg5) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T0 shiftedArg0 = shift_input(arg0, begins[0]);
+  T1 shiftedArg1 = shift_input(arg1, begins[1]);
+  T2 shiftedArg2 = shift_input(arg2, begins[2]);
+  T3 shiftedArg3 = shift_input(arg3, begins[3]);
+  T4 shiftedArg4 = shift_input(arg4, begins[4]);
+  T5 shiftedArg5 = shift_input(arg5, begins[5]);
+
+  auto theSubview =
+      Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2,
+                      shiftedArg3, shiftedArg4, shiftedArg5);
+
+  constexpr size_t rank = Kokkos::Impl::ViewMapping<
+      void /* deduce subview type from source view traits */
+      ,
+      ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type::Rank;
+
+  Kokkos::Array<int64_t, rank> subviewBegins;
+
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      0, subviewBegins, shiftedArg0, arg0, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      1, subviewBegins, shiftedArg1, arg1, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      2, subviewBegins, shiftedArg2, arg2, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      3, subviewBegins, shiftedArg3, arg3, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      4, subviewBegins, shiftedArg4, arg4, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      5, subviewBegins, shiftedArg5, arg5, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<
+          void /* deduce subview type from source view traits */
+          ,
+          ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5>::type>::type
+      offsetView(theSubview, subviewBegins);
+
+  return offsetView;
+}
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
+          class T5, class T6>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type>::type
+    subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+                   T3 arg3, T4 arg4, T5 arg5, T6 arg6) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T0 shiftedArg0 = shift_input(arg0, begins[0]);
+  T1 shiftedArg1 = shift_input(arg1, begins[1]);
+  T2 shiftedArg2 = shift_input(arg2, begins[2]);
+  T3 shiftedArg3 = shift_input(arg3, begins[3]);
+  T4 shiftedArg4 = shift_input(arg4, begins[4]);
+  T5 shiftedArg5 = shift_input(arg5, begins[5]);
+  T6 shiftedArg6 = shift_input(arg6, begins[6]);
+
+  auto theSubview =
+      Kokkos::subview(theView, shiftedArg0, shiftedArg1, shiftedArg2,
+                      shiftedArg3, shiftedArg4, shiftedArg5, shiftedArg6);
+
+  constexpr size_t rank = Kokkos::Impl::ViewMapping<
+      void /* deduce subview type from source view traits */
+      ,
+      ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type::Rank;
+
+  Kokkos::Array<int64_t, rank> subviewBegins;
+
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      0, subviewBegins, shiftedArg0, arg0, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      1, subviewBegins, shiftedArg1, arg1, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      2, subviewBegins, shiftedArg2, arg2, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      3, subviewBegins, shiftedArg3, arg3, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      4, subviewBegins, shiftedArg4, arg4, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      5, subviewBegins, shiftedArg5, arg5, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      6, subviewBegins, shiftedArg6, arg6, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<
+          void /* deduce subview type from source view traits */
+          ,
+          ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6>::type>::type
+      offsetView(theSubview, subviewBegins);
+
+  return offsetView;
+}
+
+template <class D, class... P, class T0, class T1, class T2, class T3, class T4,
+          class T5, class T6, class T7>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type>::type
+    subview_offset(const OffsetView<D, P...>& src, T0 arg0, T1 arg1, T2 arg2,
+                   T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) {
+  auto theView = src.view();
+  auto begins  = src.begins();
+
+  T0 shiftedArg0 = shift_input(arg0, begins[0]);
+  T1 shiftedArg1 = shift_input(arg1, begins[1]);
+  T2 shiftedArg2 = shift_input(arg2, begins[2]);
+  T3 shiftedArg3 = shift_input(arg3, begins[3]);
+  T4 shiftedArg4 = shift_input(arg4, begins[4]);
+  T5 shiftedArg5 = shift_input(arg5, begins[5]);
+  T6 shiftedArg6 = shift_input(arg6, begins[6]);
+  T7 shiftedArg7 = shift_input(arg7, begins[7]);
+
+  auto theSubview = Kokkos::subview(theView, shiftedArg0, shiftedArg1,
+                                    shiftedArg2, shiftedArg3, shiftedArg4,
+                                    shiftedArg5, shiftedArg6, shiftedArg7);
+
+  constexpr size_t rank = Kokkos::Impl::ViewMapping<
+      void /* deduce subview type from source view traits */
+      ,
+      ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type::Rank;
+
+  Kokkos::Array<int64_t, rank> subviewBegins;
+
+  size_t counter = 0;
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      0, subviewBegins, shiftedArg0, arg0, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      1, subviewBegins, shiftedArg1, arg1, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      2, subviewBegins, shiftedArg2, arg2, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      3, subviewBegins, shiftedArg3, arg3, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      4, subviewBegins, shiftedArg4, arg4, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      5, subviewBegins, shiftedArg5, arg5, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      6, subviewBegins, shiftedArg6, arg6, begins, counter);
+  Kokkos::Experimental::Impl::map_arg_to_new_begin(
+      7, subviewBegins, shiftedArg7, arg7, begins, counter);
+
+  typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+      typename Kokkos::Impl::ViewMapping<
+          void /* deduce subview type from source view traits */
+          ,
+          ViewTraits<D, P...>, T0, T1, T2, T3, T4, T5, T6, T7>::type>::type
+      offsetView(theSubview, subviewBegins);
+
+  return offsetView;
+}
+}  // namespace Impl
+
+template <class D, class... P, class... Args>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Experimental::Impl::GetOffsetViewTypeFromViewType<
+        typename Kokkos::Impl::ViewMapping<
+            void /* deduce subview type from source view traits */
+            ,
+            ViewTraits<D, P...>, Args...>::type>::type
+    subview(const OffsetView<D, P...>& src, Args... args) {
+  static_assert(
+      OffsetView<D, P...>::Rank == sizeof...(Args),
+      "subview requires one argument for each source OffsetView rank");
+
+  return Kokkos::Experimental::Impl::subview_offset(src, args...);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Experimental {
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const OffsetView<LT, LP...>& lhs,
+                                       const OffsetView<RT, RP...>& rhs) {
+  // Same data, layout, dimensions
+  using lhs_traits = ViewTraits<LT, LP...>;
+  using rhs_traits = ViewTraits<RT, RP...>;
+
+  return std::is_same<typename lhs_traits::const_value_type,
+                      typename rhs_traits::const_value_type>::value &&
+         std::is_same<typename lhs_traits::array_layout,
+                      typename rhs_traits::array_layout>::value &&
+         std::is_same<typename lhs_traits::memory_space,
+                      typename rhs_traits::memory_space>::value &&
+         unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
+         lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
+         lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
+         lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
+         lhs.extent(4) == rhs.extent(4) && lhs.extent(5) == rhs.extent(5) &&
+         lhs.extent(6) == rhs.extent(6) && lhs.extent(7) == rhs.extent(7) &&
+         lhs.begin(0) == rhs.begin(0) && lhs.begin(1) == rhs.begin(1) &&
+         lhs.begin(2) == rhs.begin(2) && lhs.begin(3) == rhs.begin(3) &&
+         lhs.begin(4) == rhs.begin(4) && lhs.begin(5) == rhs.begin(5) &&
+         lhs.begin(6) == rhs.begin(6) && lhs.begin(7) == rhs.begin(7);
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator!=(const OffsetView<LT, LP...>& lhs,
+                                       const OffsetView<RT, RP...>& rhs) {
+  return !(operator==(lhs, rhs));
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const View<LT, LP...>& lhs,
+                                       const OffsetView<RT, RP...>& rhs) {
+  // Same data, layout, dimensions
+  using lhs_traits = ViewTraits<LT, LP...>;
+  using rhs_traits = ViewTraits<RT, RP...>;
+
+  return std::is_same<typename lhs_traits::const_value_type,
+                      typename rhs_traits::const_value_type>::value &&
+         std::is_same<typename lhs_traits::array_layout,
+                      typename rhs_traits::array_layout>::value &&
+         std::is_same<typename lhs_traits::memory_space,
+                      typename rhs_traits::memory_space>::value &&
+         unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
+         lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
+         lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
+         lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
+         lhs.extent(4) == rhs.extent(4) && lhs.extent(5) == rhs.extent(5) &&
+         lhs.extent(6) == rhs.extent(6) && lhs.extent(7) == rhs.extent(7);
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const OffsetView<LT, LP...>& lhs,
+                                       const View<RT, RP...>& rhs) {
+  return rhs == lhs;
+}
+
+}  // namespace Experimental
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class DT, class... DP>
+inline void deep_copy(
+    const Experimental::OffsetView<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  static_assert(
+      std::is_same<typename ViewTraits<DT, DP...>::non_const_value_type,
+                   typename ViewTraits<DT, DP...>::value_type>::value,
+      "deep_copy requires non-const type");
+
+  auto dstView = dst.view();
+  Kokkos::deep_copy(dstView, value);
+}
+
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+    const Experimental::OffsetView<DT, DP...>& dst,
+    const Experimental::OffsetView<ST, SP...>& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  static_assert(
+      std::is_same<typename ViewTraits<DT, DP...>::value_type,
+                   typename ViewTraits<ST, SP...>::non_const_value_type>::value,
+      "deep_copy requires matching non-const destination type");
+
+  auto dstView = dst.view();
+  Kokkos::deep_copy(dstView, value.view());
+}
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+    const Experimental::OffsetView<DT, DP...>& dst,
+    const View<ST, SP...>& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  static_assert(
+      std::is_same<typename ViewTraits<DT, DP...>::value_type,
+                   typename ViewTraits<ST, SP...>::non_const_value_type>::value,
+      "deep_copy requires matching non-const destination type");
+
+  auto dstView = dst.view();
+  Kokkos::deep_copy(dstView, value);
+}
+
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+    const View<DT, DP...>& dst,
+    const Experimental::OffsetView<ST, SP...>& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  static_assert(
+      std::is_same<typename ViewTraits<DT, DP...>::value_type,
+                   typename ViewTraits<ST, SP...>::non_const_value_type>::value,
+      "deep_copy requires matching non-const destination type");
+
+  Kokkos::deep_copy(dst, value.view());
+}
+
+namespace Impl {
+
+// Deduce Mirror Types
+template <class Space, class T, class... P>
+struct MirrorOffsetViewType {
+  // The incoming view_type
+  using src_view_type = typename Kokkos::Experimental::OffsetView<T, P...>;
+  // The memory space for the mirror view
+  using memory_space = typename Space::memory_space;
+  // Check whether it is the same memory space
+  enum {
+    is_same_memspace =
+        std::is_same<memory_space, typename src_view_type::memory_space>::value
+  };
+  // The array_layout
+  using array_layout = typename src_view_type::array_layout;
+  // The data type (we probably want it non-const since otherwise we can't even
+  // deep_copy to it.)
+  using data_type = typename src_view_type::non_const_data_type;
+  // The destination view type if it is not the same memory space
+  using dest_view_type =
+      Kokkos::Experimental::OffsetView<data_type, array_layout, Space>;
+  // If it is the same memory_space return the existing view_type
+  // This will also keep the unmanaged trait if necessary
+  using view_type =
+      std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
+};
+
+template <class Space, class T, class... P>
+struct MirrorOffsetType {
+  // The incoming view_type
+  using src_view_type = typename Kokkos::Experimental::OffsetView<T, P...>;
+  // The memory space for the mirror view
+  using memory_space = typename Space::memory_space;
+  // Check whether it is the same memory space
+  enum {
+    is_same_memspace =
+        std::is_same<memory_space, typename src_view_type::memory_space>::value
+  };
+  // The array_layout
+  using array_layout = typename src_view_type::array_layout;
+  // The data type (we probably want it non-const since otherwise we can't even
+  // deep_copy to it.)
+  using data_type = typename src_view_type::non_const_data_type;
+  // The destination view type if it is not the same memory space
+  using view_type =
+      Kokkos::Experimental::OffsetView<data_type, array_layout, Space>;
+};
+
+}  // namespace Impl
+
+namespace Impl {
+template <class T, class... P, class... ViewCtorArgs>
+inline typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror
+create_mirror(const Kokkos::Experimental::OffsetView<T, P...>& src,
+              const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  return typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror(
+      Kokkos::create_mirror(arg_prop, src.view()), src.begins());
+}
+
+template <class Space, class T, class... P, class... ViewCtorArgs>
+inline typename Kokkos::Impl::MirrorOffsetType<Space, T, P...>::view_type
+create_mirror(const Space&,
+              const Kokkos::Experimental::OffsetView<T, P...>& src,
+              const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::has_memory_space,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a memory space instance!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  return typename Kokkos::Impl::MirrorOffsetType<Space, T, P...>::view_type(
+      prop_copy, src.layout(),
+      {src.begin(0), src.begin(1), src.begin(2), src.begin(3), src.begin(4),
+       src.begin(5), src.begin(6), src.begin(7)});
+}
+}  // namespace Impl
+
+// Create a mirror in host space
+template <class T, class... P>
+inline auto create_mirror(
+    const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror(src, Impl::ViewCtorProp<>{});
+}
+
+template <class T, class... P>
+inline auto create_mirror(
+    Kokkos::Impl::WithoutInitializing_t wi,
+    const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror(src, Kokkos::view_alloc(wi));
+}
+
+// Create a mirror in a new space
+template <class Space, class T, class... P,
+          typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline auto create_mirror(
+    const Space& space, const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror(space, src, Impl::ViewCtorProp<>{});
+}
+
+template <class Space, class T, class... P>
+typename Kokkos::Impl::MirrorOffsetType<Space, T, P...>::view_type
+create_mirror(Kokkos::Impl::WithoutInitializing_t wi, const Space& space,
+              const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror(space, src, Kokkos::view_alloc(wi));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror(src, arg_prop);
+}
+
+namespace Impl {
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    (std::is_same<
+         typename Kokkos::Experimental::OffsetView<T, P...>::memory_space,
+         typename Kokkos::Experimental::OffsetView<
+             T, P...>::HostMirror::memory_space>::value &&
+     std::is_same<typename Kokkos::Experimental::OffsetView<T, P...>::data_type,
+                  typename Kokkos::Experimental::OffsetView<
+                      T, P...>::HostMirror::data_type>::value),
+    typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror>
+create_mirror_view(
+    const typename Kokkos::Experimental::OffsetView<T, P...>& src,
+    const Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    !(std::is_same<
+          typename Kokkos::Experimental::OffsetView<T, P...>::memory_space,
+          typename Kokkos::Experimental::OffsetView<
+              T, P...>::HostMirror::memory_space>::value &&
+      std::is_same<
+          typename Kokkos::Experimental::OffsetView<T, P...>::data_type,
+          typename Kokkos::Experimental::OffsetView<
+              T, P...>::HostMirror::data_type>::value),
+    typename Kokkos::Experimental::OffsetView<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::Experimental::OffsetView<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  return Kokkos::create_mirror(arg_prop, src);
+}
+
+template <class Space, class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    Impl::MirrorOffsetViewType<Space, T, P...>::is_same_memspace,
+    Kokkos::Experimental::OffsetView<T, P...>>
+create_mirror_view(const Space&,
+                   const Kokkos::Experimental::OffsetView<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+
+template <class Space, class T, class... P, class... ViewCtorArgs>
+std::enable_if_t<
+    !Impl::MirrorOffsetViewType<Space, T, P...>::is_same_memspace,
+    typename Kokkos::Impl::MirrorOffsetViewType<Space, T, P...>::view_type>
+create_mirror_view(const Space& space,
+                   const Kokkos::Experimental::OffsetView<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  return create_mirror(space, src, arg_prop);
+}
+}  // namespace Impl
+
+// Create a mirror view in host space
+template <class T, class... P>
+inline auto create_mirror_view(
+    const typename Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror_view(src, Impl::ViewCtorProp<>{});
+}
+
+template <class T, class... P>
+inline auto create_mirror_view(
+    Kokkos::Impl::WithoutInitializing_t wi,
+    const typename Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror_view(src, Kokkos::view_alloc(wi));
+}
+
+// Create a mirror view in a new space
+template <class Space, class T, class... P,
+          typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+inline auto create_mirror_view(
+    const Space& space, const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror_view(space, src, Impl::ViewCtorProp<>{});
+}
+
+template <class Space, class T, class... P>
+inline auto create_mirror_view(
+    Kokkos::Impl::WithoutInitializing_t wi, const Space& space,
+    const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror_view(space, src, Kokkos::view_alloc(wi));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline auto create_mirror_view(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return Impl::create_mirror_view(src, arg_prop);
+}
+
+// Create a mirror view and deep_copy in a new space
+template <class... ViewCtorArgs, class T, class... P>
+typename Kokkos::Impl::MirrorOffsetViewType<
+    typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+    P...>::view_type
+create_mirror_view_and_copy(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::Experimental::OffsetView<T, P...>& src) {
+  return {create_mirror_view_and_copy(arg_prop, src.view()), src.begins()};
+}
+
+template <class Space, class T, class... P>
+typename Kokkos::Impl::MirrorOffsetViewType<Space, T, P...>::view_type
+create_mirror_view_and_copy(
+    const Space& space, const Kokkos::Experimental::OffsetView<T, P...>& src,
+    std::string const& name = "") {
+  return {create_mirror_view_and_copy(space, src.view(), name), src.begins()};
+}
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_OFFSETVIEW
+#endif
+#endif /* KOKKOS_OFFSETVIEW_HPP_ */
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_ScatterView.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_ScatterView.hpp
new file mode 100644 (file)
index 0000000..a9529d1
--- /dev/null
@@ -0,0 +1,1647 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_ScatterView.hpp
+/// \brief Declaration and definition of Kokkos::ScatterView.
+///
+/// This header file declares and defines Kokkos::ScatterView and its
+/// related nonmember functions.
+
+#ifndef KOKKOS_SCATTER_VIEW_HPP
+#define KOKKOS_SCATTER_VIEW_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SCATTERVIEW
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <utility>
+
+namespace Kokkos {
+namespace Experimental {
+
+/*
+ * Reduction Type list
+ *  - These corresponds to subset of the reducers in parallel_reduce
+ *  - See Implementations of ScatterValue for details.
+ */
+struct ScatterSum {};
+struct ScatterProd {};
+struct ScatterMax {};
+struct ScatterMin {};
+
+struct ScatterNonDuplicated {};
+struct ScatterDuplicated {};
+
+struct ScatterNonAtomic {};
+struct ScatterAtomic {};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+namespace Experimental {
+
+template <typename ExecSpace>
+struct DefaultDuplication;
+
+template <typename ExecSpace, typename Duplication>
+struct DefaultContribution;
+
+#ifdef KOKKOS_ENABLE_SERIAL
+template <>
+struct DefaultDuplication<Kokkos::Serial> {
+  using type = Kokkos::Experimental::ScatterNonDuplicated;
+};
+
+template <>
+struct DefaultContribution<Kokkos::Serial,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterNonAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::Serial,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterNonAtomic;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMP
+template <>
+struct DefaultDuplication<Kokkos::OpenMP> {
+  using type = Kokkos::Experimental::ScatterDuplicated;
+};
+template <>
+struct DefaultContribution<Kokkos::OpenMP,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::OpenMP,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterNonAtomic;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+template <>
+struct DefaultDuplication<Kokkos::Experimental::OpenMPTarget> {
+  using type = Kokkos::Experimental::ScatterNonDuplicated;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::OpenMPTarget,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::OpenMPTarget,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterNonAtomic;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_HPX
+template <>
+struct DefaultDuplication<Kokkos::Experimental::HPX> {
+  using type = Kokkos::Experimental::ScatterDuplicated;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::HPX,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::HPX,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterNonAtomic;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_THREADS
+template <>
+struct DefaultDuplication<Kokkos::Threads> {
+  using type = Kokkos::Experimental::ScatterDuplicated;
+};
+template <>
+struct DefaultContribution<Kokkos::Threads,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::Threads,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterNonAtomic;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA
+template <>
+struct DefaultDuplication<Kokkos::Cuda> {
+  using type = Kokkos::Experimental::ScatterNonDuplicated;
+};
+template <>
+struct DefaultContribution<Kokkos::Cuda,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::Cuda,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_HIP
+template <>
+struct DefaultDuplication<Kokkos::Experimental::HIP> {
+  using type = Kokkos::Experimental::ScatterNonDuplicated;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::HIP,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::HIP,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_SYCL
+template <>
+struct DefaultDuplication<Kokkos::Experimental::SYCL> {
+  using type = Kokkos::Experimental::ScatterNonDuplicated;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::SYCL,
+                           Kokkos::Experimental::ScatterNonDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+template <>
+struct DefaultContribution<Kokkos::Experimental::SYCL,
+                           Kokkos::Experimental::ScatterDuplicated> {
+  using type = Kokkos::Experimental::ScatterAtomic;
+};
+#endif
+
+// FIXME All these scatter values need overhaul:
+//   - like should they be copyable at all?
+//   - what is the internal handle type
+//   - remove join
+//   - consistently use the update function in operators
+template <typename ValueType, typename Op, typename DeviceType,
+          typename Contribution>
+struct ScatterValue;
+
+/* ScatterValue <Op=ScatterSum, Contribution=ScatterNonAtomic> is
+   the object returned by the access operator() of ScatterAccess. This class
+   inherits from the Sum<> reducer and it wraps join(dest, src) with convenient
+   operator+=, etc. Note the addition of update(ValueType const& rhs) and
+   reset()  so that all reducers can have common functions See ReduceDuplicates
+   and ResetDuplicates ) */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterSum, DeviceType,
+                    Kokkos::Experimental::ScatterNonAtomic> {
+  ValueType& value;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ScatterValue&& other)
+      : value(other.value) {}
+  KOKKOS_FORCEINLINE_FUNCTION void operator+=(ValueType const& rhs) {
+    update(rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void operator++() { update(1); }
+  KOKKOS_FORCEINLINE_FUNCTION void operator++(int) { update(1); }
+  KOKKOS_FORCEINLINE_FUNCTION void operator-=(ValueType const& rhs) {
+    update(ValueType(-rhs));
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void operator--() { update(ValueType(-1)); }
+  KOKKOS_FORCEINLINE_FUNCTION void operator--(int) { update(ValueType(-1)); }
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    value += rhs;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::sum();
+  }
+};
+
+/* ScatterValue <Op=ScatterSum, Contribution=ScatterAtomic> is the
+ object returned by the access operator() of ScatterAccess. This class inherits
+ from the Sum<> reducer, and similar to that returned by an Atomic View, it
+ wraps Kokkos::atomic_add with convenient operator+=, etc. This version also has
+ the update(rhs) and reset() functions. */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterSum, DeviceType,
+                    Kokkos::Experimental::ScatterAtomic> {
+  ValueType& value;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+
+  KOKKOS_FORCEINLINE_FUNCTION void operator+=(ValueType const& rhs) {
+    this->join(value, rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void operator++() { this->join(value, 1); }
+  KOKKOS_FORCEINLINE_FUNCTION void operator++(int) { this->join(value, 1); }
+  KOKKOS_FORCEINLINE_FUNCTION void operator-=(ValueType const& rhs) {
+    this->join(value, ValueType(-rhs));
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void operator--() {
+    this->join(value, ValueType(-1));
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void operator--(int) {
+    this->join(value, ValueType(-1));
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void join(ValueType& dest, const ValueType& src) const {
+    Kokkos::atomic_add(&dest, src);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    this->join(value, rhs);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::sum();
+  }
+};
+
+/* ScatterValue <Op=ScatterProd, Contribution=ScatterNonAtomic> is
+   the object returned by the access operator() of ScatterAccess.  This class
+   inherits from the Prod<> reducer, and it wraps join(dest, src) with
+   convenient operator*=, etc. Note the addition of update(ValueType const& rhs)
+   and reset()  so that all reducers can have common functions See
+   ReduceDuplicates and ResetDuplicates ) */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterProd, DeviceType,
+                    Kokkos::Experimental::ScatterNonAtomic> {
+  ValueType& value;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ScatterValue&& other)
+      : value(other.value) {}
+  KOKKOS_FORCEINLINE_FUNCTION void operator*=(ValueType const& rhs) {
+    value *= rhs;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void operator/=(ValueType const& rhs) {
+    value /= rhs;
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    value *= rhs;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::prod();
+  }
+};
+
+/* ScatterValue <Op=ScatterProd, Contribution=ScatterAtomic> is the
+ object returned by the access operator() of ScatterAccess.  This class
+ inherits from the Prod<> reducer, and similar to that returned by an Atomic
+ View, it wraps and atomic_prod with convenient operator*=, etc. atomic_prod
+ uses the atomic_compare_exchange. This version also has the update(rhs)
+ and reset() functions. */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterProd, DeviceType,
+                    Kokkos::Experimental::ScatterAtomic> {
+  ValueType& value;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ScatterValue&& other)
+      : value(other.value) {}
+
+  KOKKOS_FORCEINLINE_FUNCTION void operator*=(ValueType const& rhs) {
+    Kokkos::atomic_mul(&value, rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void operator/=(ValueType const& rhs) {
+    Kokkos::atomic_div(&value, rhs);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void join(ValueType& dest, const ValueType& src) const {
+    atomic_prod(&dest, src);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    atomic_prod(&value, rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::prod();
+  }
+};
+
+/* ScatterValue <Op=ScatterMin, Contribution=ScatterNonAtomic> is
+   the object returned by the access operator() of ScatterAccess. This class
+   inherits from the Min<> reducer and it wraps join(dest, src) with convenient
+   update(rhs). Note the addition of update(ValueType const& rhs) and reset()
+   are so that all reducers can have a common update function See
+   ReduceDuplicates and ResetDuplicates ) */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterMin, DeviceType,
+                    Kokkos::Experimental::ScatterNonAtomic> {
+  ValueType& value;
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ScatterValue&& other)
+      : value(other.value) {}
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    value = rhs < value ? rhs : value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::min();
+  }
+};
+
+/* ScatterValue <Op=ScatterMin, Contribution=ScatterAtomic> is the
+   object returned by the access operator() of ScatterAccess. This class
+   inherits from the Min<> reducer, and similar to that returned by an Atomic
+   View, it wraps atomic_min with join(), etc. atomic_min uses the
+   atomic_compare_exchange. This version also has the update(rhs) and reset()
+   functions. */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterMin, DeviceType,
+                    Kokkos::Experimental::ScatterAtomic> {
+  ValueType& value;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ScatterValue&& other)
+      : value(other.value) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void join(ValueType& dest, const ValueType& src) const {
+    atomic_min(&dest, src);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    this->join(value, rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::min();
+  }
+};
+
+/* ScatterValue <Op=ScatterMax, Contribution=ScatterNonAtomic> is
+   the object returned by the access operator() of ScatterAccess. This class
+   inherits from the Max<> reducer and it wraps join(dest, src) with convenient
+   update(rhs). Note the addition of update(ValueType const& rhs) and reset()
+   are so that all reducers can have a common update function See
+   ReduceDuplicates and ResetDuplicates ) */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterMax, DeviceType,
+                    Kokkos::Experimental::ScatterNonAtomic> {
+  ValueType& value;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ScatterValue&& other)
+      : value(other.value) {}
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    value = rhs > value ? rhs : value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::max();
+  }
+};
+
+/* ScatterValue <Op=ScatterMax, Contribution=ScatterAtomic> is the
+   object returned by the access operator() of ScatterAccess. This class
+   inherits from the Max<> reducer, and similar to that returned by an Atomic
+   View, it wraps atomic_max with join(), etc. atomic_max uses the
+   atomic_compare_exchange. This version also has the update(rhs) and reset()
+   functions. */
+template <typename ValueType, typename DeviceType>
+struct ScatterValue<ValueType, Kokkos::Experimental::ScatterMax, DeviceType,
+                    Kokkos::Experimental::ScatterAtomic> {
+  ValueType& value;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ValueType& value_in)
+      : value(value_in) {}
+  KOKKOS_FORCEINLINE_FUNCTION ScatterValue(ScatterValue&& other)
+      : value(other.value) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void join(ValueType& dest, const ValueType& src) const {
+    atomic_max(&dest, src);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION void update(ValueType const& rhs) {
+    this->join(value, rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void reset() {
+    value = reduction_identity<ValueType>::max();
+  }
+};
+
+/* DuplicatedDataType, given a View DataType, will create a new DataType
+   that has a new runtime dimension which becomes the largest-stride dimension.
+   In the case of LayoutLeft, due to the limitation induced by the design of
+   DataType itself, it must convert any existing compile-time dimensions into
+   runtime dimensions. */
+template <typename T, typename Layout>
+struct DuplicatedDataType;
+
+template <typename T>
+struct DuplicatedDataType<T, Kokkos::LayoutRight> {
+  using value_type = T*;  // For LayoutRight, add a star all the way on the left
+};
+
+template <typename T, size_t N>
+struct DuplicatedDataType<T[N], Kokkos::LayoutRight> {
+  using value_type =
+      typename DuplicatedDataType<T, Kokkos::LayoutRight>::value_type[N];
+};
+
+template <typename T>
+struct DuplicatedDataType<T[], Kokkos::LayoutRight> {
+  using value_type =
+      typename DuplicatedDataType<T, Kokkos::LayoutRight>::value_type[];
+};
+
+template <typename T>
+struct DuplicatedDataType<T*, Kokkos::LayoutRight> {
+  using value_type =
+      typename DuplicatedDataType<T, Kokkos::LayoutRight>::value_type*;
+};
+
+template <typename T>
+struct DuplicatedDataType<T, Kokkos::LayoutLeft> {
+  using value_type = T*;
+};
+
+template <typename T, size_t N>
+struct DuplicatedDataType<T[N], Kokkos::LayoutLeft> {
+  using value_type =
+      typename DuplicatedDataType<T, Kokkos::LayoutLeft>::value_type*;
+};
+
+template <typename T>
+struct DuplicatedDataType<T[], Kokkos::LayoutLeft> {
+  using value_type =
+      typename DuplicatedDataType<T, Kokkos::LayoutLeft>::value_type*;
+};
+
+template <typename T>
+struct DuplicatedDataType<T*, Kokkos::LayoutLeft> {
+  using value_type =
+      typename DuplicatedDataType<T, Kokkos::LayoutLeft>::value_type*;
+};
+
+/* Insert integer argument pack into array */
+
+template <class T>
+void args_to_array(size_t* array, int pos, T dim0) {
+  array[pos] = dim0;
+}
+template <class T, class... Dims>
+void args_to_array(size_t* array, int pos, T dim0, Dims... dims) {
+  array[pos] = dim0;
+  args_to_array(array, pos + 1, dims...);
+}
+
+/* Slice is just responsible for stuffing the correct number of Kokkos::ALL
+   arguments on the correct side of the index in a call to subview() to get a
+   subview where the index specified is the largest-stride one. */
+template <typename Layout, int rank, typename V, typename... Args>
+struct Slice {
+  using next       = Slice<Layout, rank - 1, V, Kokkos::Impl::ALL_t, Args...>;
+  using value_type = typename next::value_type;
+
+  static value_type get(V const& src, const size_t i, Args... args) {
+    return next::get(src, i, Kokkos::ALL, args...);
+  }
+};
+
+template <typename V, typename... Args>
+struct Slice<Kokkos::LayoutRight, 1, V, Args...> {
+  using value_type =
+      typename Kokkos::Impl::ViewMapping<void, V, const size_t, Args...>::type;
+  static value_type get(V const& src, const size_t i, Args... args) {
+    return Kokkos::subview(src, i, args...);
+  }
+};
+
+template <typename V, typename... Args>
+struct Slice<Kokkos::LayoutLeft, 1, V, Args...> {
+  using value_type =
+      typename Kokkos::Impl::ViewMapping<void, V, Args..., const size_t>::type;
+  static value_type get(V const& src, const size_t i, Args... args) {
+    return Kokkos::subview(src, args..., i);
+  }
+};
+
+template <typename ExecSpace, typename ValueType, typename Op>
+struct ReduceDuplicates;
+
+template <typename ExecSpace, typename ValueType, typename Op>
+struct ReduceDuplicatesBase {
+  using Derived = ReduceDuplicates<ExecSpace, ValueType, Op>;
+  ValueType const* src;
+  ValueType* dst;
+  size_t stride;
+  size_t start;
+  size_t n;
+  ReduceDuplicatesBase(ExecSpace const& exec_space, ValueType const* src_in,
+                       ValueType* dest_in, size_t stride_in, size_t start_in,
+                       size_t n_in, std::string const& name)
+      : src(src_in), dst(dest_in), stride(stride_in), start(start_in), n(n_in) {
+    parallel_for(
+        std::string("Kokkos::ScatterView::ReduceDuplicates [") + name + "]",
+        RangePolicy<ExecSpace, size_t>(exec_space, 0, stride),
+        static_cast<Derived const&>(*this));
+  }
+};
+
+/* ReduceDuplicates -- Perform reduction on destination array using strided
+ * source Use ScatterValue<> specific to operation to wrap destination array so
+ * that the reduction operation can be accessed via the update(rhs) function */
+template <typename ExecSpace, typename ValueType, typename Op>
+struct ReduceDuplicates
+    : public ReduceDuplicatesBase<ExecSpace, ValueType, Op> {
+  using Base = ReduceDuplicatesBase<ExecSpace, ValueType, Op>;
+  ReduceDuplicates(ExecSpace const& exec_space, ValueType const* src_in,
+                   ValueType* dst_in, size_t stride_in, size_t start_in,
+                   size_t n_in, std::string const& name)
+      : Base(exec_space, src_in, dst_in, stride_in, start_in, n_in, name) {}
+  KOKKOS_FORCEINLINE_FUNCTION void operator()(size_t i) const {
+    for (size_t j = Base::start; j < Base::n; ++j) {
+      ScatterValue<ValueType, Op, ExecSpace,
+                   Kokkos::Experimental::ScatterNonAtomic>
+          sv(Base::dst[i]);
+      sv.update(Base::src[i + Base::stride * j]);
+    }
+  }
+};
+
+template <typename ExecSpace, typename ValueType, typename Op>
+struct ResetDuplicates;
+
+template <typename ExecSpace, typename ValueType, typename Op>
+struct ResetDuplicatesBase {
+  using Derived = ResetDuplicates<ExecSpace, ValueType, Op>;
+  ValueType* data;
+  ResetDuplicatesBase(ExecSpace const& exec_space, ValueType* data_in,
+                      size_t size_in, std::string const& name)
+      : data(data_in) {
+    parallel_for(
+        std::string("Kokkos::ScatterView::ResetDuplicates [") + name + "]",
+        RangePolicy<ExecSpace, size_t>(exec_space, 0, size_in),
+        static_cast<Derived const&>(*this));
+  }
+};
+
+/* ResetDuplicates -- Perform reset on destination array
+ *    Use ScatterValue<> specific to operation to wrap destination array so that
+ *    the reset operation can be accessed via the reset() function */
+template <typename ExecSpace, typename ValueType, typename Op>
+struct ResetDuplicates : public ResetDuplicatesBase<ExecSpace, ValueType, Op> {
+  using Base = ResetDuplicatesBase<ExecSpace, ValueType, Op>;
+  ResetDuplicates(ExecSpace const& exec_space, ValueType* data_in,
+                  size_t size_in, std::string const& name)
+      : Base(exec_space, data_in, size_in, name) {}
+  KOKKOS_FORCEINLINE_FUNCTION void operator()(size_t i) const {
+    ScatterValue<ValueType, Op, ExecSpace,
+                 Kokkos::Experimental::ScatterNonAtomic>
+        sv(Base::data[i]);
+    sv.reset();
+  }
+};
+
+template <typename... P>
+void check_scatter_view_allocation_properties_argument(
+    ViewCtorProp<P...> const&) {
+  static_assert(ViewCtorProp<P...>::has_execution_space &&
+                    ViewCtorProp<P...>::has_label &&
+                    ViewCtorProp<P...>::initialize,
+                "Allocation property must have an execution name as well as a "
+                "label, and must perform the view initialization");
+}
+
+}  // namespace Experimental
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+
+template <typename DataType,
+          typename Layout      = Kokkos::DefaultExecutionSpace::array_layout,
+          typename DeviceType  = Kokkos::DefaultExecutionSpace,
+          typename Op          = Kokkos::Experimental::ScatterSum,
+          typename Duplication = typename Kokkos::Impl::Experimental::
+              DefaultDuplication<typename DeviceType::execution_space>::type,
+          typename Contribution =
+              typename Kokkos::Impl::Experimental::DefaultContribution<
+                  typename DeviceType::execution_space, Duplication>::type>
+class ScatterView;
+
+template <typename DataType, typename Op, typename DeviceType, typename Layout,
+          typename Duplication, typename Contribution,
+          typename OverrideContribution>
+class ScatterAccess;
+
+// non-duplicated implementation
+template <typename DataType, typename Op, typename DeviceType, typename Layout,
+          typename Contribution>
+class ScatterView<DataType, Layout, DeviceType, Op, ScatterNonDuplicated,
+                  Contribution> {
+ public:
+  using execution_space         = typename DeviceType::execution_space;
+  using memory_space            = typename DeviceType::memory_space;
+  using device_type             = Kokkos::Device<execution_space, memory_space>;
+  using original_view_type      = Kokkos::View<DataType, Layout, device_type>;
+  using original_value_type     = typename original_view_type::value_type;
+  using original_reference_type = typename original_view_type::reference_type;
+  friend class ScatterAccess<DataType, Op, DeviceType, Layout,
+                             ScatterNonDuplicated, Contribution,
+                             ScatterNonAtomic>;
+  friend class ScatterAccess<DataType, Op, DeviceType, Layout,
+                             ScatterNonDuplicated, Contribution, ScatterAtomic>;
+  template <class, class, class, class, class, class>
+  friend class ScatterView;
+
+  ScatterView() = default;
+
+  template <typename RT, typename... RP>
+  ScatterView(View<RT, RP...> const& original_view)
+      : internal_view(original_view) {}
+
+  template <typename RT, typename... P, typename... RP>
+  ScatterView(execution_space const& /* exec_space */,
+              View<RT, RP...> const& original_view)
+      : internal_view(original_view) {}
+
+  template <typename... Dims>
+  ScatterView(std::string const& name, Dims... dims)
+      : internal_view(name, dims...) {}
+
+  // This overload allows specifying an execution space instance to be
+  // used by passing, e.g., Kokkos::view_alloc(exec_space, "label") as
+  // first argument.
+  template <typename... P, typename... Dims>
+  ScatterView(::Kokkos::Impl::ViewCtorProp<P...> const& arg_prop, Dims... dims)
+      : internal_view(arg_prop, dims...) {
+    using ::Kokkos::Impl::Experimental::
+        check_scatter_view_allocation_properties_argument;
+    check_scatter_view_allocation_properties_argument(arg_prop);
+  }
+
+  template <typename OtherDataType, typename OtherDeviceType>
+  KOKKOS_FUNCTION ScatterView(
+      const ScatterView<OtherDataType, Layout, OtherDeviceType, Op,
+                        ScatterNonDuplicated, Contribution>& other_view)
+      : internal_view(other_view.internal_view) {}
+
+  template <typename OtherDataType, typename OtherDeviceType>
+  KOKKOS_FUNCTION void operator=(
+      const ScatterView<OtherDataType, Layout, OtherDeviceType, Op,
+                        ScatterNonDuplicated, Contribution>& other_view) {
+    internal_view = other_view.internal_view;
+  }
+
+  template <typename OverrideContribution = Contribution>
+  KOKKOS_FORCEINLINE_FUNCTION
+      ScatterAccess<DataType, Op, DeviceType, Layout, ScatterNonDuplicated,
+                    Contribution, OverrideContribution>
+      access() const {
+    return ScatterAccess<DataType, Op, DeviceType, Layout, ScatterNonDuplicated,
+                         Contribution, OverrideContribution>(*this);
+  }
+
+  original_view_type subview() const { return internal_view; }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return internal_view.is_allocated();
+  }
+
+  template <typename DT, typename... RP>
+  void contribute_into(View<DT, RP...> const& dest) const {
+    contribute_into(execution_space(), dest);
+  }
+
+  template <typename DT, typename... RP>
+  void contribute_into(execution_space const& exec_space,
+                       View<DT, RP...> const& dest) const {
+    using dest_type = View<DT, RP...>;
+    static_assert(std::is_same<typename dest_type::array_layout, Layout>::value,
+                  "ScatterView contribute destination has different layout");
+    static_assert(
+        Kokkos::SpaceAccessibility<
+            execution_space, typename dest_type::memory_space>::accessible,
+        "ScatterView contribute destination memory space not accessible");
+    if (dest.data() == internal_view.data()) return;
+    Kokkos::Impl::Experimental::ReduceDuplicates<execution_space,
+                                                 original_value_type, Op>(
+        exec_space, internal_view.data(), dest.data(), 0, 0, 1,
+        internal_view.label());
+  }
+
+  void reset(execution_space const& exec_space = execution_space()) {
+    Kokkos::Impl::Experimental::ResetDuplicates<execution_space,
+                                                original_value_type, Op>(
+        exec_space, internal_view.data(), internal_view.size(),
+        internal_view.label());
+  }
+  template <typename DT, typename... RP>
+  void reset_except(View<DT, RP...> const& view) {
+    reset_except(execution_space(), view);
+  }
+
+  template <typename DT, typename... RP>
+  void reset_except(const execution_space& exec_space,
+                    View<DT, RP...> const& view) {
+    if (view.data() != internal_view.data()) reset(exec_space);
+  }
+
+  void resize(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::resize(internal_view, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  template <class... ViewCtorArgs>
+  void resize(const ::Kokkos::Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+              const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::resize(arg_prop, internal_view, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  template <class I>
+  std::enable_if_t<Kokkos::Impl::is_view_ctor_property<I>::value> resize(
+      const I& arg_prop, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::resize(arg_prop, internal_view, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  template <class... ViewCtorArgs>
+  void realloc(const Kokkos::Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+               const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::realloc(arg_prop, internal_view, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  void realloc(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::realloc(internal_view, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+  template <class I>
+  std::enable_if_t<Kokkos::Impl::is_view_ctor_property<I>::value> realloc(
+      const I& arg_prop, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::realloc(arg_prop, internal_view, n0, n1, n2, n3, n4, n5, n6, n7);
+  }
+
+ protected:
+  template <typename... Args>
+  KOKKOS_FORCEINLINE_FUNCTION original_reference_type at(Args... args) const {
+    return internal_view(args...);
+  }
+
+ private:
+  using internal_view_type = original_view_type;
+  internal_view_type internal_view;
+};
+
+template <typename DataType, typename Op, typename DeviceType, typename Layout,
+          typename Contribution, typename OverrideContribution>
+class ScatterAccess<DataType, Op, DeviceType, Layout, ScatterNonDuplicated,
+                    Contribution, OverrideContribution> {
+ public:
+  using view_type           = ScatterView<DataType, Layout, DeviceType, Op,
+                                ScatterNonDuplicated, Contribution>;
+  using original_value_type = typename view_type::original_value_type;
+  using value_type          = Kokkos::Impl::Experimental::ScatterValue<
+      original_value_type, Op, DeviceType, OverrideContribution>;
+
+  KOKKOS_INLINE_FUNCTION
+  ScatterAccess() : view(view_type()) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ScatterAccess(view_type const& view_in) : view(view_in) {}
+  KOKKOS_DEFAULTED_FUNCTION
+  ~ScatterAccess() = default;
+
+  template <typename... Args>
+  KOKKOS_FORCEINLINE_FUNCTION value_type operator()(Args... args) const {
+    return view.at(args...);
+  }
+
+  template <typename Arg>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      view_type::original_view_type::rank == 1 && std::is_integral<Arg>::value,
+      value_type>
+  operator[](Arg arg) const {
+    return view.at(arg);
+  }
+
+ private:
+  view_type const& view;
+};
+
+// duplicated implementation
+// LayoutLeft and LayoutRight are different enough that we'll just specialize
+// each
+
+template <typename DataType, typename Op, typename DeviceType,
+          typename Contribution>
+class ScatterView<DataType, Kokkos::LayoutRight, DeviceType, Op,
+                  ScatterDuplicated, Contribution> {
+ public:
+  using execution_space = typename DeviceType::execution_space;
+  using memory_space    = typename DeviceType::memory_space;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using original_view_type =
+      Kokkos::View<DataType, Kokkos::LayoutRight, device_type>;
+  using original_value_type     = typename original_view_type::value_type;
+  using original_reference_type = typename original_view_type::reference_type;
+  friend class ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutRight,
+                             ScatterDuplicated, Contribution, ScatterNonAtomic>;
+  friend class ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutRight,
+                             ScatterDuplicated, Contribution, ScatterAtomic>;
+  template <class, class, class, class, class, class>
+  friend class ScatterView;
+
+  using data_type_info =
+      typename Kokkos::Impl::Experimental::DuplicatedDataType<
+          DataType, Kokkos::LayoutRight>;
+  using internal_data_type = typename data_type_info::value_type;
+  using internal_view_type =
+      Kokkos::View<internal_data_type, Kokkos::LayoutRight, device_type>;
+
+  ScatterView() = default;
+
+  template <typename OtherDataType, typename OtherDeviceType>
+  KOKKOS_FUNCTION ScatterView(
+      const ScatterView<OtherDataType, Kokkos::LayoutRight, OtherDeviceType, Op,
+                        ScatterDuplicated, Contribution>& other_view)
+      : unique_token(other_view.unique_token),
+        internal_view(other_view.internal_view) {}
+
+  template <typename OtherDataType, typename OtherDeviceType>
+  KOKKOS_FUNCTION void operator=(
+      const ScatterView<OtherDataType, Kokkos::LayoutRight, OtherDeviceType, Op,
+                        ScatterDuplicated, Contribution>& other_view) {
+    unique_token  = other_view.unique_token;
+    internal_view = other_view.internal_view;
+  }
+
+  template <typename RT, typename... RP>
+  ScatterView(View<RT, RP...> const& original_view)
+      : ScatterView(execution_space(), original_view) {}
+
+  template <typename RT, typename... P, typename... RP>
+  ScatterView(execution_space const& exec_space,
+              View<RT, RP...> const& original_view)
+      : unique_token(),
+        internal_view(
+            view_alloc(WithoutInitializing,
+                       std::string("duplicated_") + original_view.label(),
+                       exec_space),
+            unique_token.size(),
+            original_view.rank_dynamic > 0 ? original_view.extent(0)
+                                           : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            original_view.rank_dynamic > 1 ? original_view.extent(1)
+                                           : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            original_view.rank_dynamic > 2 ? original_view.extent(2)
+                                           : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            original_view.rank_dynamic > 3 ? original_view.extent(3)
+                                           : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            original_view.rank_dynamic > 4 ? original_view.extent(4)
+                                           : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            original_view.rank_dynamic > 5 ? original_view.extent(5)
+                                           : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+            original_view.rank_dynamic > 6 ? original_view.extent(6)
+                                           : KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+
+  {
+    reset(exec_space);
+  }
+
+  template <typename... Dims>
+  ScatterView(std::string const& name, Dims... dims)
+      : ScatterView(view_alloc(execution_space(), name), dims...) {}
+
+  // This overload allows specifying an execution space instance to be
+  // used by passing, e.g., Kokkos::view_alloc(exec_space, "label") as
+  // first argument.
+  template <typename... P, typename... Dims>
+  ScatterView(::Kokkos::Impl::ViewCtorProp<P...> const& arg_prop, Dims... dims)
+      : internal_view(view_alloc(WithoutInitializing,
+                                 static_cast<::Kokkos::Impl::ViewCtorProp<
+                                     void, std::string> const&>(arg_prop)
+                                     .value),
+                      unique_token.size(), dims...) {
+    using ::Kokkos::Impl::Experimental::
+        check_scatter_view_allocation_properties_argument;
+    check_scatter_view_allocation_properties_argument(arg_prop);
+
+    auto const exec_space =
+        static_cast<::Kokkos::Impl::ViewCtorProp<void, execution_space> const&>(
+            arg_prop)
+            .value;
+    reset(exec_space);
+  }
+
+  template <typename OverrideContribution = Contribution>
+  KOKKOS_FORCEINLINE_FUNCTION
+      ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutRight,
+                    ScatterDuplicated, Contribution, OverrideContribution>
+      access() const {
+    return ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutRight,
+                         ScatterDuplicated, Contribution, OverrideContribution>(
+        *this);
+  }
+
+  typename Kokkos::Impl::Experimental::Slice<Kokkos::LayoutRight,
+                                             internal_view_type::rank,
+                                             internal_view_type>::value_type
+  subview() const {
+    return Kokkos::Impl::Experimental::Slice<
+        Kokkos::LayoutRight, internal_view_type::Rank,
+        internal_view_type>::get(internal_view, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return internal_view.is_allocated();
+  }
+
+  template <typename DT, typename... RP>
+  void contribute_into(View<DT, RP...> const& dest) const {
+    contribute_into(execution_space(), dest);
+  }
+
+  template <typename DT, typename... RP>
+  void contribute_into(execution_space const& exec_space,
+                       View<DT, RP...> const& dest) const {
+    using dest_type = View<DT, RP...>;
+    static_assert(std::is_same<typename dest_type::array_layout,
+                               Kokkos::LayoutRight>::value,
+                  "ScatterView deep_copy destination has different layout");
+    static_assert(
+        Kokkos::SpaceAccessibility<
+            execution_space, typename dest_type::memory_space>::accessible,
+        "ScatterView deep_copy destination memory space not accessible");
+    bool is_equal = (dest.data() == internal_view.data());
+    size_t start  = is_equal ? 1 : 0;
+    Kokkos::Impl::Experimental::ReduceDuplicates<execution_space,
+                                                 original_value_type, Op>(
+        exec_space, internal_view.data(), dest.data(), internal_view.stride(0),
+        start, internal_view.extent(0), internal_view.label());
+  }
+
+  void reset(execution_space const& exec_space = execution_space()) {
+    Kokkos::Impl::Experimental::ResetDuplicates<execution_space,
+                                                original_value_type, Op>(
+        exec_space, internal_view.data(), internal_view.size(),
+        internal_view.label());
+  }
+
+  template <typename DT, typename... RP>
+  void reset_except(View<DT, RP...> const& view) {
+    reset_except(execution_space(), view);
+  }
+
+  template <typename DT, typename... RP>
+  void reset_except(execution_space const& exec_space,
+                    View<DT, RP...> const& view) {
+    if (view.data() != internal_view.data()) {
+      reset(exec_space);
+      return;
+    }
+    Kokkos::Impl::Experimental::ResetDuplicates<execution_space,
+                                                original_value_type, Op>(
+        exec_space, internal_view.data() + view.size(),
+        internal_view.size() - view.size(), internal_view.label());
+  }
+
+  void resize(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::resize(internal_view, unique_token.size(), n0, n1, n2, n3, n4, n5,
+                     n6);
+  }
+
+  template <class... ViewCtorArgs>
+  void resize(const ::Kokkos::Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+              const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::resize(arg_prop, internal_view, unique_token.size(), n0, n1, n2,
+                     n3, n4, n5, n6);
+  }
+
+  template <class I>
+  std::enable_if_t<Kokkos::Impl::is_view_ctor_property<I>::value> resize(
+      const I& arg_prop, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::resize(arg_prop, internal_view, unique_token.size(), n0, n1, n2,
+                     n3, n4, n5, n6);
+  }
+
+  template <class... ViewCtorArgs>
+  void realloc(const ::Kokkos::Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+               const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::realloc(arg_prop, internal_view, unique_token.size(), n0, n1, n2,
+                      n3, n4, n5, n6);
+  }
+
+  void realloc(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::realloc(internal_view, unique_token.size(), n0, n1, n2, n3, n4,
+                      n5, n6);
+  }
+
+  template <class I>
+  std::enable_if_t<Kokkos::Impl::is_view_ctor_property<I>::value> realloc(
+      const I& arg_prop, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    ::Kokkos::realloc(arg_prop, internal_view, unique_token.size(), n0, n1, n2,
+                      n3, n4, n5, n6);
+  }
+
+ protected:
+  template <typename... Args>
+  KOKKOS_FORCEINLINE_FUNCTION original_reference_type at(int rank,
+                                                         Args... args) const {
+    return internal_view(rank, args...);
+  }
+
+ protected:
+  using unique_token_type = Kokkos::Experimental::UniqueToken<
+      execution_space, Kokkos::Experimental::UniqueTokenScope::Global>;
+
+  unique_token_type unique_token;
+  internal_view_type internal_view;
+};
+
+template <typename DataType, typename Op, typename DeviceType,
+          typename Contribution>
+class ScatterView<DataType, Kokkos::LayoutLeft, DeviceType, Op,
+                  ScatterDuplicated, Contribution> {
+ public:
+  using execution_space = typename DeviceType::execution_space;
+  using memory_space    = typename DeviceType::memory_space;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using original_view_type =
+      Kokkos::View<DataType, Kokkos::LayoutLeft, device_type>;
+  using original_value_type     = typename original_view_type::value_type;
+  using original_reference_type = typename original_view_type::reference_type;
+  friend class ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutLeft,
+                             ScatterDuplicated, Contribution, ScatterNonAtomic>;
+  friend class ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutLeft,
+                             ScatterDuplicated, Contribution, ScatterAtomic>;
+  template <class, class, class, class, class, class>
+  friend class ScatterView;
+
+  using data_type_info =
+      typename Kokkos::Impl::Experimental::DuplicatedDataType<
+          DataType, Kokkos::LayoutLeft>;
+  using internal_data_type = typename data_type_info::value_type;
+  using internal_view_type =
+      Kokkos::View<internal_data_type, Kokkos::LayoutLeft, device_type>;
+
+  ScatterView() = default;
+
+  template <typename RT, typename... RP>
+  ScatterView(View<RT, RP...> const& original_view)
+      : ScatterView(execution_space(), original_view) {}
+
+  template <typename RT, typename... P, typename... RP>
+  ScatterView(execution_space const& exec_space,
+              View<RT, RP...> const& original_view)
+      : unique_token() {
+    size_t arg_N[8] = {original_view.rank > 0 ? original_view.extent(0)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 1 ? original_view.extent(1)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 2 ? original_view.extent(2)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 3 ? original_view.extent(3)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 4 ? original_view.extent(4)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 5 ? original_view.extent(5)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 6 ? original_view.extent(6)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       KOKKOS_IMPL_CTOR_DEFAULT_ARG};
+    arg_N[internal_view_type::rank - 1] = unique_token.size();
+    internal_view                       = internal_view_type(
+        view_alloc(WithoutInitializing,
+                   std::string("duplicated_") + original_view.label(),
+                   exec_space),
+        arg_N[0], arg_N[1], arg_N[2], arg_N[3], arg_N[4], arg_N[5], arg_N[6],
+        arg_N[7]);
+    reset(exec_space);
+  }
+
+  template <typename... Dims>
+  ScatterView(std::string const& name, Dims... dims)
+      : ScatterView(view_alloc(execution_space(), name), dims...) {}
+
+  // This overload allows specifying an execution space instance to be
+  // used by passing, e.g., Kokkos::view_alloc(exec_space, "label") as
+  // first argument.
+  template <typename... P, typename... Dims>
+  ScatterView(::Kokkos::Impl::ViewCtorProp<P...> const& arg_prop,
+              Dims... dims) {
+    using ::Kokkos::Impl::Experimental::
+        check_scatter_view_allocation_properties_argument;
+    check_scatter_view_allocation_properties_argument(arg_prop);
+
+    original_view_type original_view;
+    size_t arg_N[8] = {original_view.rank > 0 ? original_view.static_extent(0)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 1 ? original_view.static_extent(1)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 2 ? original_view.static_extent(2)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 3 ? original_view.static_extent(3)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 4 ? original_view.static_extent(4)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 5 ? original_view.static_extent(5)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       original_view.rank > 6 ? original_view.static_extent(6)
+                                              : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                       KOKKOS_IMPL_CTOR_DEFAULT_ARG};
+    Kokkos::Impl::Experimental::args_to_array(arg_N, 0, dims...);
+    arg_N[internal_view_type::rank - 1] = unique_token.size();
+
+    auto const name =
+        static_cast<::Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
+            arg_prop)
+            .value;
+    internal_view = internal_view_type(view_alloc(WithoutInitializing, name),
+                                       arg_N[0], arg_N[1], arg_N[2], arg_N[3],
+                                       arg_N[4], arg_N[5], arg_N[6], arg_N[7]);
+
+    auto const exec_space =
+        static_cast<::Kokkos::Impl::ViewCtorProp<void, execution_space> const&>(
+            arg_prop)
+            .value;
+    reset(exec_space);
+  }
+
+  template <typename OtherDataType, typename OtherDeviceType>
+  KOKKOS_FUNCTION ScatterView(
+      const ScatterView<OtherDataType, Kokkos::LayoutLeft, OtherDeviceType, Op,
+                        ScatterDuplicated, Contribution>& other_view)
+      : unique_token(other_view.unique_token),
+        internal_view(other_view.internal_view) {}
+
+  template <typename OtherDataType, typename OtherDeviceType>
+  KOKKOS_FUNCTION void operator=(
+      const ScatterView<OtherDataType, Kokkos::LayoutLeft, OtherDeviceType, Op,
+                        ScatterDuplicated, Contribution>& other_view) {
+    unique_token  = other_view.unique_token;
+    internal_view = other_view.internal_view;
+  }
+
+  template <typename OverrideContribution = Contribution>
+  KOKKOS_FORCEINLINE_FUNCTION
+      ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutLeft,
+                    ScatterDuplicated, Contribution, OverrideContribution>
+      access() const {
+    return ScatterAccess<DataType, Op, DeviceType, Kokkos::LayoutLeft,
+                         ScatterDuplicated, Contribution, OverrideContribution>(
+        *this);
+  }
+
+  typename Kokkos::Impl::Experimental::Slice<Kokkos::LayoutLeft,
+                                             internal_view_type::rank,
+                                             internal_view_type>::value_type
+  subview() const {
+    return Kokkos::Impl::Experimental::Slice<
+        Kokkos::LayoutLeft, internal_view_type::rank,
+        internal_view_type>::get(internal_view, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return internal_view.is_allocated();
+  }
+
+  template <typename... RP>
+  void contribute_into(View<RP...> const& dest) const {
+    contribute_into(execution_space(), dest);
+  }
+
+  template <typename... RP>
+  void contribute_into(execution_space const& exec_space,
+                       View<RP...> const& dest) const {
+    using dest_type = View<RP...>;
+    static_assert(
+        std::is_same<typename dest_type::value_type,
+                     typename original_view_type::non_const_value_type>::value,
+        "ScatterView deep_copy destination has wrong value_type");
+    static_assert(std::is_same<typename dest_type::array_layout,
+                               Kokkos::LayoutLeft>::value,
+                  "ScatterView deep_copy destination has different layout");
+    static_assert(
+        Kokkos::SpaceAccessibility<
+            execution_space, typename dest_type::memory_space>::accessible,
+        "ScatterView deep_copy destination memory space not accessible");
+    auto extent   = internal_view.extent(internal_view_type::rank - 1);
+    bool is_equal = (dest.data() == internal_view.data());
+    size_t start  = is_equal ? 1 : 0;
+    Kokkos::Impl::Experimental::ReduceDuplicates<execution_space,
+                                                 original_value_type, Op>(
+        exec_space, internal_view.data(), dest.data(),
+        internal_view.stride(internal_view_type::rank - 1), start, extent,
+        internal_view.label());
+  }
+
+  void reset(execution_space const& exec_space = execution_space()) {
+    Kokkos::Impl::Experimental::ResetDuplicates<execution_space,
+                                                original_value_type, Op>(
+        exec_space, internal_view.data(), internal_view.size(),
+        internal_view.label());
+  }
+
+  template <typename DT, typename... RP>
+  void reset_except(View<DT, RP...> const& view) {
+    reset_except(execution_space(), view);
+  }
+
+  template <typename DT, typename... RP>
+  void reset_except(execution_space const& exec_space,
+                    View<DT, RP...> const& view) {
+    if (view.data() != internal_view.data()) {
+      reset(exec_space);
+      return;
+    }
+    Kokkos::Impl::Experimental::ResetDuplicates<execution_space,
+                                                original_value_type, Op>(
+        exec_space, internal_view.data() + view.size(),
+        internal_view.size() - view.size(), internal_view.label());
+  }
+
+  void resize(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+              const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    size_t arg_N[8] = {n0, n1, n2, n3, n4, n5, n6, 0};
+    const int i     = internal_view.rank - 1;
+    arg_N[i]        = unique_token.size();
+
+    ::Kokkos::resize(internal_view, arg_N[0], arg_N[1], arg_N[2], arg_N[3],
+                     arg_N[4], arg_N[5], arg_N[6], arg_N[7]);
+  }
+
+  void realloc(const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+               const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+    size_t arg_N[8] = {n0, n1, n2, n3, n4, n5, n6, 0};
+    const int i     = internal_view.rank - 1;
+    arg_N[i]        = unique_token.size();
+
+    ::Kokkos::realloc(internal_view, arg_N[0], arg_N[1], arg_N[2], arg_N[3],
+                      arg_N[4], arg_N[5], arg_N[6], arg_N[7]);
+  }
+
+ protected:
+  template <typename... Args>
+  KOKKOS_FORCEINLINE_FUNCTION original_reference_type at(int thread_id,
+                                                         Args... args) const {
+    return internal_view(args..., thread_id);
+  }
+
+ protected:
+  using unique_token_type = Kokkos::Experimental::UniqueToken<
+      execution_space, Kokkos::Experimental::UniqueTokenScope::Global>;
+
+  unique_token_type unique_token;
+  internal_view_type internal_view;
+};
+
+/* This object has to be separate in order to store the thread ID, which cannot
+   be obtained until one is inside a parallel construct, and may be relatively
+   expensive to obtain at every contribution
+   (calls a non-inlined function, looks up a thread-local variable).
+   Due to the expense, it is sensible to query it at most once per parallel
+   iterate (ideally once per thread, but parallel_for doesn't expose that) and
+   then store it in a stack variable.
+   ScatterAccess serves as a non-const object on the stack which can store the
+   thread ID */
+
+template <typename DataType, typename Op, typename DeviceType, typename Layout,
+          typename Contribution, typename OverrideContribution>
+class ScatterAccess<DataType, Op, DeviceType, Layout, ScatterDuplicated,
+                    Contribution, OverrideContribution> {
+ public:
+  using view_type           = ScatterView<DataType, Layout, DeviceType, Op,
+                                ScatterDuplicated, Contribution>;
+  using original_value_type = typename view_type::original_value_type;
+  using value_type          = Kokkos::Impl::Experimental::ScatterValue<
+      original_value_type, Op, DeviceType, OverrideContribution>;
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  ScatterAccess(view_type const& view_in)
+      : view(view_in), thread_id(view_in.unique_token.acquire()) {}
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  ~ScatterAccess() {
+    if (thread_id != ~thread_id_type(0)) view.unique_token.release(thread_id);
+  }
+
+  template <typename... Args>
+  KOKKOS_FORCEINLINE_FUNCTION value_type operator()(Args... args) const {
+    return view.at(thread_id, args...);
+  }
+
+  template <typename Arg>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      view_type::original_view_type::rank == 1 && std::is_integral<Arg>::value,
+      value_type>
+  operator[](Arg arg) const {
+    return view.at(thread_id, arg);
+  }
+
+ private:
+  view_type const& view;
+
+  // simplify RAII by disallowing copies
+  ScatterAccess(ScatterAccess const& other) = delete;
+  ScatterAccess& operator=(ScatterAccess const& other) = delete;
+  ScatterAccess& operator=(ScatterAccess&& other) = delete;
+
+ public:
+  // do need to allow moves though, for the common
+  // auto b = a.access();
+  // that assignments turns into a move constructor call
+  KOKKOS_FORCEINLINE_FUNCTION
+  ScatterAccess(ScatterAccess&& other)
+      : view(other.view), thread_id(other.thread_id) {
+    other.thread_id = ~thread_id_type(0);
+  }
+
+ private:
+  using unique_token_type = typename view_type::unique_token_type;
+  using thread_id_type    = typename unique_token_type::size_type;
+  thread_id_type thread_id;
+};
+
+template <typename Op          = Kokkos::Experimental::ScatterSum,
+          typename Duplication = void, typename Contribution = void,
+          typename RT, typename... RP>
+ScatterView<
+    RT, typename ViewTraits<RT, RP...>::array_layout,
+    typename ViewTraits<RT, RP...>::device_type, Op,
+    std::conditional_t<
+        std::is_void<Duplication>::value,
+        typename Kokkos::Impl::Experimental::DefaultDuplication<
+            typename ViewTraits<RT, RP...>::execution_space>::type,
+        Duplication>,
+    std::conditional_t<
+        std::is_void<Contribution>::value,
+        typename Kokkos::Impl::Experimental::DefaultContribution<
+            typename ViewTraits<RT, RP...>::execution_space,
+            typename std::conditional_t<
+                std::is_void<Duplication>::value,
+                typename Kokkos::Impl::Experimental::DefaultDuplication<
+                    typename ViewTraits<RT, RP...>::execution_space>::type,
+                Duplication>>::type,
+        Contribution>>
+create_scatter_view(View<RT, RP...> const& original_view) {
+  return original_view;  // implicit ScatterView constructor call
+}
+
+template <typename Op, typename RT, typename... RP>
+ScatterView<
+    RT, typename ViewTraits<RT, RP...>::array_layout,
+    typename ViewTraits<RT, RP...>::device_type, Op,
+    typename Kokkos::Impl::Experimental::DefaultDuplication<
+        typename ViewTraits<RT, RP...>::execution_space>::type,
+    typename Kokkos::Impl::Experimental::DefaultContribution<
+        typename ViewTraits<RT, RP...>::execution_space,
+        typename Kokkos::Impl::Experimental::DefaultDuplication<
+            typename ViewTraits<RT, RP...>::execution_space>::type>::type>
+create_scatter_view(Op, View<RT, RP...> const& original_view) {
+  return original_view;  // implicit ScatterView constructor call
+}
+
+template <typename Op, typename Duplication, typename Contribution, typename RT,
+          typename... RP>
+ScatterView<RT, typename ViewTraits<RT, RP...>::array_layout,
+            typename ViewTraits<RT, RP...>::device_type, Op, Duplication,
+            Contribution>
+create_scatter_view(Op, Duplication, Contribution,
+                    View<RT, RP...> const& original_view) {
+  return original_view;  // implicit ScatterView constructor call
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+
+template <typename DT1, typename DT2, typename LY, typename ES, typename OP,
+          typename CT, typename DP, typename... VP>
+void contribute(
+    typename ES::execution_space const& exec_space, View<DT1, VP...>& dest,
+    Kokkos::Experimental::ScatterView<DT2, LY, ES, OP, CT, DP> const& src) {
+  src.contribute_into(exec_space, dest);
+}
+
+template <typename DT1, typename DT2, typename LY, typename ES, typename OP,
+          typename CT, typename DP, typename... VP>
+void contribute(
+    View<DT1, VP...>& dest,
+    Kokkos::Experimental::ScatterView<DT2, LY, ES, OP, CT, DP> const& src) {
+  using execution_space = typename ES::execution_space;
+  contribute(execution_space{}, dest, src);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <typename DT, typename LY, typename ES, typename OP, typename CT,
+          typename DP, typename... IS, class... ViewCtorArgs>
+void realloc(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    Kokkos::Experimental::ScatterView<DT, LY, ES, OP, CT, DP>& scatter_view,
+    IS... is) {
+  scatter_view.realloc(arg_prop, is...);
+}
+
+template <typename DT, typename LY, typename ES, typename OP, typename CT,
+          typename DP, typename... IS>
+void realloc(
+    Kokkos::Experimental::ScatterView<DT, LY, ES, OP, CT, DP>& scatter_view,
+    IS... is) {
+  scatter_view.realloc(is...);
+}
+
+template <typename I, typename DT, typename LY, typename ES, typename OP,
+          typename CT, typename DP, typename... IS>
+std::enable_if_t<Kokkos::Impl::is_view_ctor_property<I>::value> realloc(
+    const I& arg_prop,
+    Kokkos::Experimental::ScatterView<DT, LY, ES, OP, CT, DP>& scatter_view,
+    IS... is) {
+  scatter_view.realloc(arg_prop, is...);
+}
+
+template <typename DT, typename LY, typename ES, typename OP, typename CT,
+          typename DP, typename... IS>
+void resize(
+    Kokkos::Experimental::ScatterView<DT, LY, ES, OP, CT, DP>& scatter_view,
+    IS... is) {
+  scatter_view.resize(is...);
+}
+
+template <class... ViewCtorArgs, typename DT, typename LY, typename ES,
+          typename OP, typename CT, typename DP, typename... IS>
+void resize(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    Kokkos::Experimental::ScatterView<DT, LY, ES, OP, CT, DP>& scatter_view,
+    IS... is) {
+  scatter_view.resize(arg_prop, is...);
+}
+
+template <typename I, typename DT, typename LY, typename ES, typename OP,
+          typename CT, typename DP, typename... IS>
+std::enable_if_t<Kokkos::Impl::is_view_ctor_property<I>::value> resize(
+    const I& arg_prop,
+    Kokkos::Experimental::ScatterView<DT, LY, ES, OP, CT, DP>& scatter_view,
+    IS... is) {
+  scatter_view.resize(arg_prop, is...);
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SCATTERVIEW
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_SCATTERVIEW
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_StaticCrsGraph.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_StaticCrsGraph.hpp
new file mode 100644 (file)
index 0000000..219b08b
--- /dev/null
@@ -0,0 +1,507 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STATICCRSGRAPH_HPP
+#define KOKKOS_STATICCRSGRAPH_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_STATICCRSGRAPH
+#endif
+
+#include <string>
+#include <vector>
+
+#include <Kokkos_View.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_Parallel_Reduce.hpp>
+
+namespace Kokkos {
+
+namespace Impl {
+template <class RowOffsetsType, class RowBlockOffsetsType>
+struct StaticCrsGraphBalancerFunctor {
+  using int_type = typename RowOffsetsType::non_const_value_type;
+  RowOffsetsType row_offsets;
+  RowBlockOffsetsType row_block_offsets;
+
+  int_type cost_per_row, num_blocks;
+
+  StaticCrsGraphBalancerFunctor(RowOffsetsType row_offsets_,
+                                RowBlockOffsetsType row_block_offsets_,
+                                int_type cost_per_row_, int_type num_blocks_)
+      : row_offsets(row_offsets_),
+        row_block_offsets(row_block_offsets_),
+        cost_per_row(cost_per_row_),
+        num_blocks(num_blocks_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const int_type& iRow) const {
+    const int_type num_rows    = row_offsets.extent(0) - 1;
+    const int_type num_entries = row_offsets(num_rows);
+    const int_type total_cost  = num_entries + num_rows * cost_per_row;
+
+    const double cost_per_workset = 1.0 * total_cost / num_blocks;
+
+    const int_type row_cost =
+        row_offsets(iRow + 1) - row_offsets(iRow) + cost_per_row;
+
+    int_type count = row_offsets(iRow + 1) + cost_per_row * iRow;
+
+    if (iRow == num_rows - 1) row_block_offsets(num_blocks) = num_rows;
+
+    if (true) {
+      int_type current_block =
+          (count - row_cost - cost_per_row) / cost_per_workset;
+      int_type end_block = count / cost_per_workset;
+
+      // Handle some corner cases for the last two blocks.
+      if (current_block >= num_blocks - 2) {
+        if ((current_block == num_blocks - 2) &&
+            (count >= (current_block + 1) * cost_per_workset)) {
+          int_type row   = iRow;
+          int_type cc    = count - row_cost - cost_per_row;
+          int_type block = cc / cost_per_workset;
+          while ((block > 0) && (block == current_block)) {
+            cc    = row_offsets(row) + row * cost_per_row;
+            block = cc / cost_per_workset;
+            row--;
+          }
+          if ((count - cc - row_cost - cost_per_row) <
+              num_entries - row_offsets(iRow + 1)) {
+            row_block_offsets(current_block + 1) = iRow + 1;
+          } else {
+            row_block_offsets(current_block + 1) = iRow;
+          }
+        }
+      } else {
+        if ((count >= (current_block + 1) * cost_per_workset) ||
+            (iRow + 2 == int_type(row_offsets.extent(0)))) {
+          if (end_block > current_block + 1) {
+            int_type num_block                   = end_block - current_block;
+            row_block_offsets(current_block + 1) = iRow;
+            for (int_type block = current_block + 2; block <= end_block;
+                 block++)
+              if ((block < current_block + 2 + (num_block - 1) / 2))
+                row_block_offsets(block) = iRow;
+              else
+                row_block_offsets(block) = iRow + 1;
+          } else {
+            row_block_offsets(current_block + 1) = iRow + 1;
+          }
+        }
+      }
+    }
+  }
+};
+}  // namespace Impl
+
+/// \class GraphRowViewConst
+/// \brief View of a row of a sparse graph.
+/// \tparam GraphType Sparse graph type, such as (but not limited to)
+/// StaticCrsGraph.
+///
+/// This class provides a generic view of a row of a sparse graph.
+/// We intended this class to view a row of a StaticCrsGraph, but
+/// GraphType need not necessarily be CrsMatrix.
+///
+/// The row view is suited for computational kernels like sparse
+/// matrix-vector multiply, as well as for modifying entries in the
+/// sparse matrix.  The view is always const as it does not allow graph
+/// modification.
+///
+/// Here is an example loop over the entries in the row:
+/// \code
+/// using ordinal_type = typename GraphRowViewConst<MatrixType>::ordinal_type;
+///
+/// GraphRowView<GraphType> G_i = ...;
+/// const ordinal_type numEntries = G_i.length;
+/// for (ordinal_type k = 0; k < numEntries; ++k) {
+///   ordinal_type j = G_i.colidx (k);
+///   // ... do something with A_ij and j ...
+/// }
+/// \endcode
+///
+/// GraphType must provide the \c data_type
+/// aliases. In addition, it must make sense to use GraphRowViewConst to
+/// view a row of GraphType. In particular, column
+/// indices of a row must be accessible using the <tt>entries</tt>
+/// resp. <tt>colidx</tt> arrays given to the constructor of this
+/// class, with a constant <tt>stride</tt> between successive entries.
+/// The stride is one for the compressed sparse row storage format (as
+/// is used by CrsMatrix), but may be greater than one for other
+/// sparse matrix storage formats (e.g., ELLPACK or jagged diagonal).
+template <class GraphType>
+struct GraphRowViewConst {
+  //! The type of the column indices in the row.
+  using ordinal_type = const typename GraphType::data_type;
+
+ private:
+  //! Array of (local) column indices in the row.
+  ordinal_type* colidx_;
+  /// \brief Stride between successive entries in the row.
+  ///
+  /// For compressed sparse row (CSR) storage, this is always one.
+  /// This might be greater than one for storage formats like ELLPACK
+  /// or Jagged Diagonal.  Nevertheless, the stride can never be
+  /// greater than the number of rows or columns in the matrix.  Thus,
+  /// \c ordinal_type is the correct type.
+  const ordinal_type stride_;
+
+ public:
+  /// \brief Constructor
+  ///
+  /// \param values [in] Array of the row's values.
+  /// \param colidx [in] Array of the row's column indices.
+  /// \param stride [in] (Constant) stride between matrix entries in
+  ///   each of the above arrays.
+  /// \param count [in] Number of entries in the row.
+  KOKKOS_INLINE_FUNCTION
+  GraphRowViewConst(ordinal_type* const colidx_in, const ordinal_type& stride,
+                    const ordinal_type& count)
+      : colidx_(colidx_in), stride_(stride), length(count) {}
+
+  /// \brief Constructor with offset into \c colidx array
+  ///
+  /// \param colidx [in] Array of the row's column indices.
+  /// \param stride [in] (Constant) stride between matrix entries in
+  ///   each of the above arrays.
+  /// \param count [in] Number of entries in the row.
+  /// \param idx [in] Start offset into \c colidx array
+  ///
+  /// \tparam OffsetType The type of \c idx (see above).  Must be a
+  ///   built-in integer type.  This may differ from ordinal_type.
+  ///   For example, the matrix may have dimensions that fit in int,
+  ///   but a number of entries that does not fit in int.
+  template <class OffsetType>
+  KOKKOS_INLINE_FUNCTION GraphRowViewConst(
+      const typename GraphType::entries_type& colidx_in,
+      const ordinal_type& stride, const ordinal_type& count,
+      const OffsetType& idx,
+      const std::enable_if_t<std::is_integral<OffsetType>::value, int>& = 0)
+      : colidx_(&colidx_in(idx)), stride_(stride), length(count) {}
+
+  /// \brief Number of entries in the row.
+  ///
+  /// This is a public const field rather than a public const method,
+  /// in order to avoid possible overhead of a method call if the
+  /// compiler is unable to inline that method call.
+  ///
+  /// We assume that rows contain no duplicate entries (i.e., entries
+  /// with the same column index).  Thus, a row may have up to
+  /// A.numCols() entries.  This means that the correct type of
+  /// 'length' is ordinal_type.
+  const ordinal_type length;
+
+  /// \brief (Const) reference to the column index of entry i in this
+  ///   row of the sparse matrix.
+  ///
+  /// "Entry i" is not necessarily the entry with column index i, nor
+  /// does i necessarily correspond to the (local) row index.
+  KOKKOS_INLINE_FUNCTION
+  ordinal_type& colidx(const ordinal_type& i) const {
+    return colidx_[i * stride_];
+  }
+
+  /// \brief An alias for colidx
+  KOKKOS_INLINE_FUNCTION
+  ordinal_type& operator()(const ordinal_type& i) const { return colidx(i); }
+};
+
+/// \class StaticCrsGraph
+/// \brief Compressed row storage array.
+///
+/// \tparam DataType The type of stored entries.  If a StaticCrsGraph is
+///   used as the graph of a sparse matrix, then this is usually an
+///   integer type, the type of the column indices in the sparse
+///   matrix.
+///
+/// \tparam Arg1Type The second template parameter, corresponding
+///   either to the Device type (if there are no more template
+///   parameters) or to the Layout type (if there is at least one more
+///   template parameter).
+///
+/// \tparam Arg2Type The third template parameter, which if provided
+///   corresponds to the Device type.
+///
+/// \tparam Arg3Type The third template parameter, which if provided
+///   corresponds to the MemoryTraits.
+///
+/// \tparam SizeType The type of row offsets.  Usually the default
+///   parameter suffices.  However, setting a nondefault value is
+///   necessary in some cases, for example, if you want to have a
+///   sparse matrices with dimensions (and therefore column indices)
+///   that fit in \c int, but want to store more than <tt>INT_MAX</tt>
+///   entries in the sparse matrix.
+///
+/// A row has a range of entries:
+/// <ul>
+/// <li> <tt> row_map[i0] <= entry < row_map[i0+1] </tt> </li>
+/// <li> <tt> 0 <= i1 < row_map[i0+1] - row_map[i0] </tt> </li>
+/// <li> <tt> entries( entry ,            i2 , i3 , ... ); </tt> </li>
+/// <li> <tt> entries( row_map[i0] + i1 , i2 , i3 , ... ); </tt> </li>
+/// </ul>
+template <class DataType, class Arg1Type, class Arg2Type = void,
+          class Arg3Type    = void,
+          typename SizeType = typename ViewTraits<DataType*, Arg1Type, Arg2Type,
+                                                  Arg3Type>::size_type>
+class StaticCrsGraph {
+ private:
+  using traits = ViewTraits<DataType*, Arg1Type, Arg2Type, Arg3Type>;
+
+ public:
+  using data_type       = DataType;
+  using array_layout    = typename traits::array_layout;
+  using execution_space = typename traits::execution_space;
+  using device_type     = typename traits::device_type;
+  using memory_traits   = typename traits::memory_traits;
+  using size_type       = SizeType;
+
+  using staticcrsgraph_type =
+      StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type, SizeType>;
+  using HostMirror = StaticCrsGraph<data_type, array_layout,
+                                    typename traits::host_mirror_space,
+                                    memory_traits, size_type>;
+
+  using row_map_type =
+      View<const size_type*, array_layout, device_type, memory_traits>;
+  using entries_type =
+      View<data_type*, array_layout, device_type, memory_traits>;
+  using row_block_type =
+      View<const size_type*, array_layout, device_type, memory_traits>;
+
+  entries_type entries;
+  row_map_type row_map;
+  row_block_type row_block_offsets;
+
+  //! Construct an empty view.
+  KOKKOS_INLINE_FUNCTION
+  StaticCrsGraph() : entries(), row_map(), row_block_offsets() {}
+
+  //! Copy constructor (shallow copy).
+  KOKKOS_INLINE_FUNCTION
+  StaticCrsGraph(const StaticCrsGraph& rhs)
+      : entries(rhs.entries),
+        row_map(rhs.row_map),
+        row_block_offsets(rhs.row_block_offsets) {}
+
+  template <class EntriesType, class RowMapType>
+  KOKKOS_INLINE_FUNCTION StaticCrsGraph(const EntriesType& entries_,
+                                        const RowMapType& row_map_)
+      : entries(entries_), row_map(row_map_), row_block_offsets() {}
+
+  /** \brief  Assign to a view of the rhs array.
+   *          If the old view is the last view
+   *          then allocated memory is deallocated.
+   */
+  KOKKOS_INLINE_FUNCTION
+  StaticCrsGraph& operator=(const StaticCrsGraph& rhs) {
+    entries           = rhs.entries;
+    row_map           = rhs.row_map;
+    row_block_offsets = rhs.row_block_offsets;
+    return *this;
+  }
+
+  /**  \brief  Destroy this view of the array.
+   *           If the last view then allocated memory is deallocated.
+   */
+  KOKKOS_DEFAULTED_FUNCTION
+  ~StaticCrsGraph() = default;
+
+  /**  \brief  Return number of rows in the graph
+   */
+  KOKKOS_INLINE_FUNCTION
+  size_type numRows() const {
+    return (row_map.extent(0) != 0)
+               ? row_map.extent(0) - static_cast<size_type>(1)
+               : static_cast<size_type>(0);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return (row_map.is_allocated() && entries.is_allocated());
+  }
+
+  /// \brief Return a const view of row i of the graph.
+  ///
+  /// If row i does not belong to the graph, return an empty view.
+  ///
+  /// The returned object \c view implements the following interface:
+  /// <ul>
+  /// <li> \c view.length is the number of entries in the row </li>
+  /// <li> \c view.colidx(k) returns a const reference to the
+  ///      column index of the k-th entry in the row </li>
+  /// </ul>
+  /// k is not a column index; it just counts from 0 to
+  /// <tt>view.length - 1</tt>.
+  ///
+  /// Users should not rely on the return type of this method.  They
+  /// should instead assign to 'auto'.  That allows compile-time
+  /// polymorphism for different kinds of sparse matrix formats (e.g.,
+  /// ELLPACK or Jagged Diagonal) that we may wish to support in the
+  /// future.
+  KOKKOS_INLINE_FUNCTION
+  GraphRowViewConst<StaticCrsGraph> rowConst(const data_type i) const {
+    const size_type start = row_map(i);
+    // count is guaranteed to fit in ordinal_type, as long as no row
+    // has duplicate entries.
+    const data_type count = static_cast<data_type>(row_map(i + 1) - start);
+
+    if (count == 0) {
+      return GraphRowViewConst<StaticCrsGraph>(nullptr, 1, 0);
+    } else {
+      return GraphRowViewConst<StaticCrsGraph>(entries, 1, count, start);
+    }
+  }
+
+  /**  \brief  Create a row partitioning into a given number of blocks
+   *           balancing non-zeros + a fixed cost per row.
+   */
+  void create_block_partitioning(size_type num_blocks,
+                                 size_type fix_cost_per_row = 4) {
+    View<size_type*, array_layout, device_type> block_offsets(
+        "StatisCrsGraph::load_balance_offsets", num_blocks + 1);
+
+    Impl::StaticCrsGraphBalancerFunctor<
+        row_map_type, View<size_type*, array_layout, device_type> >
+        partitioner(row_map, block_offsets, fix_cost_per_row, num_blocks);
+
+    Kokkos::parallel_for("Kokkos::StaticCrsGraph::create_block_partitioning",
+                         Kokkos::RangePolicy<execution_space>(0, numRows()),
+                         partitioner);
+    typename device_type::execution_space().fence(
+        "Kokkos::StaticCrsGraph::create_block_partitioning:: fence after "
+        "partition");
+
+    row_block_offsets = block_offsets;
+  }
+};
+
+//----------------------------------------------------------------------------
+
+template <class StaticCrsGraphType, class InputSizeType>
+typename StaticCrsGraphType::staticcrsgraph_type create_staticcrsgraph(
+    const std::string& label, const std::vector<InputSizeType>& input);
+
+template <class StaticCrsGraphType, class InputSizeType>
+typename StaticCrsGraphType::staticcrsgraph_type create_staticcrsgraph(
+    const std::string& label,
+    const std::vector<std::vector<InputSizeType> >& input);
+
+//----------------------------------------------------------------------------
+
+template <class DataType, class Arg1Type, class Arg2Type, class Arg3Type,
+          typename SizeType>
+typename StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                        SizeType>::HostMirror
+create_mirror_view(const StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                                        SizeType>& input);
+
+template <class DataType, class Arg1Type, class Arg2Type, class Arg3Type,
+          typename SizeType>
+typename StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                        SizeType>::HostMirror
+create_mirror(const StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                                   SizeType>& input);
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#include <impl/Kokkos_StaticCrsGraph_factory.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class GraphType>
+struct StaticCrsGraphMaximumEntry {
+  using execution_space = typename GraphType::execution_space;
+  using value_type      = typename GraphType::data_type;
+
+  const typename GraphType::entries_type entries;
+
+  StaticCrsGraphMaximumEntry(const GraphType& graph) : entries(graph.entries) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const unsigned i, value_type& update) const {
+    if (update < entries(i)) update = entries(i);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& update) const { update = 0; }
+
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    if (update < input) update = input;
+  }
+};
+
+}  // namespace Impl
+
+template <class DataType, class Arg1Type, class Arg2Type, class Arg3Type,
+          typename SizeType>
+DataType maximum_entry(const StaticCrsGraph<DataType, Arg1Type, Arg2Type,
+                                            Arg3Type, SizeType>& graph) {
+  using GraphType =
+      StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type, SizeType>;
+  using FunctorType = Impl::StaticCrsGraphMaximumEntry<GraphType>;
+
+  DataType result = 0;
+  Kokkos::parallel_reduce("Kokkos::maximum_entry", graph.entries.extent(0),
+                          FunctorType(graph), result);
+  return result;
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_STATICCRSGRAPH
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_STATICCRSGRAPH
+#endif
+#endif /* #ifndef KOKKOS_CRSARRAY_HPP */
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_UnorderedMap.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_UnorderedMap.hpp
new file mode 100644 (file)
index 0000000..6c11264
--- /dev/null
@@ -0,0 +1,922 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_UnorderedMap.hpp
+/// \brief Declaration and definition of Kokkos::UnorderedMap.
+///
+/// This header file declares and defines Kokkos::UnorderedMap and its
+/// related nonmember functions.
+
+#ifndef KOKKOS_UNORDERED_MAP_HPP
+#define KOKKOS_UNORDERED_MAP_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <Kokkos_Functional.hpp>
+
+#include <Kokkos_Bitset.hpp>
+
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_UnorderedMap_impl.hpp>
+
+#include <iostream>
+
+#include <cstdint>
+
+namespace Kokkos {
+
+enum : unsigned { UnorderedMapInvalidIndex = ~0u };
+
+/// \brief First element of the return value of UnorderedMap::insert().
+///
+/// Inserting an element into an UnorderedMap is not guaranteed to
+/// succeed.  There are three possible conditions:
+/// <ol>
+/// <li> <tt>INSERT_FAILED</tt>: The insert failed.  This usually
+///      means that the UnorderedMap ran out of space. </li>
+/// <li> <tt>INSERT_SUCCESS</tt>: The insert succeeded, and the key
+///      did <i>not</i> exist in the table before. </li>
+/// <li> <tt>INSERT_EXISTING</tt>: The insert succeeded, and the key
+///      <i>did</i> exist in the table before.  The new value was
+///      ignored and the old value was left in place. </li>
+/// </ol>
+
+class UnorderedMapInsertResult {
+ private:
+  enum Status : uint32_t {
+    SUCCESS          = 1u << 31,
+    EXISTING         = 1u << 30,
+    FREED_EXISTING   = 1u << 29,
+    LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
+  };
+
+ public:
+  /// Did the map successful insert the key/value pair
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool success() const { return (m_status & SUCCESS); }
+
+  /// Was the key already present in the map
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool existing() const { return (m_status & EXISTING); }
+
+  /// Did the map fail to insert the key due to insufficient capacity
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool failed() const { return m_index == UnorderedMapInvalidIndex; }
+
+  /// Did the map lose a race condition to insert a dupulicate key/value pair
+  /// where an index was claimed that needed to be released
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool freed_existing() const { return (m_status & FREED_EXISTING); }
+
+  /// How many iterations through the insert loop did it take before the
+  /// map returned
+  KOKKOS_FORCEINLINE_FUNCTION
+  uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); }
+
+  /// Index where the key can be found as long as the insert did not fail
+  KOKKOS_FORCEINLINE_FUNCTION
+  uint32_t index() const { return m_index; }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  UnorderedMapInsertResult() : m_index(UnorderedMapInvalidIndex), m_status(0) {}
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  void increment_list_position() {
+    m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u;
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  void set_existing(uint32_t i, bool arg_freed_existing) {
+    m_index = i;
+    m_status =
+        EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position();
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  void set_success(uint32_t i) {
+    m_index  = i;
+    m_status = SUCCESS | list_position();
+  }
+
+ private:
+  uint32_t m_index;
+  uint32_t m_status;
+};
+
+/// \class UnorderedMap
+/// \brief Thread-safe, performance-portable lookup table.
+///
+/// This class provides a lookup table.  In terms of functionality,
+/// this class compares to std::unordered_map (new in C++11).
+/// "Unordered" means that keys are not stored in any particular
+/// order, unlike (for example) std::map.  "Thread-safe" means that
+/// lookups, insertion, and deletion are safe to call by multiple
+/// threads in parallel.  "Performance-portable" means that parallel
+/// performance of these operations is reasonable, on multiple
+/// hardware platforms.  Platforms on which performance has been
+/// tested include conventional Intel x86 multicore processors, Intel
+/// Xeon Phi ("MIC"), and NVIDIA GPUs.
+///
+/// Parallel performance portability entails design decisions that
+/// might differ from one's expectation for a sequential interface.
+/// This particularly affects insertion of single elements.  In an
+/// interface intended for sequential use, insertion might reallocate
+/// memory if the original allocation did not suffice to hold the new
+/// element.  In this class, insertion does <i>not</i> reallocate
+/// memory.  This means that it might fail.  insert() returns an enum
+/// which indicates whether the insert failed.  There are three
+/// possible conditions:
+/// <ol>
+/// <li> <tt>INSERT_FAILED</tt>: The insert failed.  This usually
+///      means that the UnorderedMap ran out of space. </li>
+/// <li> <tt>INSERT_SUCCESS</tt>: The insert succeeded, and the key
+///      did <i>not</i> exist in the table before. </li>
+/// <li> <tt>INSERT_EXISTING</tt>: The insert succeeded, and the key
+///      <i>did</i> exist in the table before.  The new value was
+///      ignored and the old value was left in place. </li>
+/// </ol>
+///
+/// \tparam Key Type of keys of the lookup table.  If \c const, users
+///   are not allowed to add or remove keys, though they are allowed
+///   to change values.  In that case, the implementation may make
+///   optimizations specific to the <tt>Device</tt>.  For example, if
+///   <tt>Device</tt> is \c Cuda, it may use texture fetches to access
+///   keys.
+///
+/// \tparam Value Type of values stored in the lookup table.  You may use
+///   \c void here, in which case the table will be a set of keys.  If
+///   \c const, users are not allowed to change entries.
+///   In that case, the implementation may make
+///   optimizations specific to the \c Device, such as using texture
+///   fetches to access values.
+///
+/// \tparam Device The Kokkos Device type.
+///
+/// \tparam Hasher Definition of the hash function for instances of
+///   <tt>Key</tt>.  The default will calculate a bitwise hash.
+///
+/// \tparam EqualTo Definition of the equality function for instances of
+///   <tt>Key</tt>.  The default will do a bitwise equality comparison.
+///
+template <typename Key, typename Value,
+          typename Device  = Kokkos::DefaultExecutionSpace,
+          typename Hasher  = pod_hash<std::remove_const_t<Key>>,
+          typename EqualTo = pod_equal_to<std::remove_const_t<Key>>>
+class UnorderedMap {
+ private:
+  using host_mirror_space =
+      typename ViewTraits<Key, Device, void, void>::host_mirror_space;
+
+ public:
+  //! \name Public types and constants
+  //@{
+
+  // key_types
+  using declared_key_type = Key;
+  using key_type          = std::remove_const_t<declared_key_type>;
+  using const_key_type    = std::add_const_t<key_type>;
+
+  // value_types
+  using declared_value_type = Value;
+  using value_type          = std::remove_const_t<declared_value_type>;
+  using const_value_type    = std::add_const_t<value_type>;
+
+  using device_type     = Device;
+  using execution_space = typename Device::execution_space;
+  using hasher_type     = Hasher;
+  using equal_to_type   = EqualTo;
+  using size_type       = uint32_t;
+
+  // map_types
+  using declared_map_type =
+      UnorderedMap<declared_key_type, declared_value_type, device_type,
+                   hasher_type, equal_to_type>;
+  using insertable_map_type = UnorderedMap<key_type, value_type, device_type,
+                                           hasher_type, equal_to_type>;
+  using modifiable_map_type =
+      UnorderedMap<const_key_type, value_type, device_type, hasher_type,
+                   equal_to_type>;
+  using const_map_type = UnorderedMap<const_key_type, const_value_type,
+                                      device_type, hasher_type, equal_to_type>;
+
+  static const bool is_set = std::is_void<value_type>::value;
+  static const bool has_const_key =
+      std::is_same<const_key_type, declared_key_type>::value;
+  static const bool has_const_value =
+      is_set || std::is_same<const_value_type, declared_value_type>::value;
+
+  static const bool is_insertable_map =
+      !has_const_key && (is_set || !has_const_value);
+  static const bool is_modifiable_map = has_const_key && !has_const_value;
+  static const bool is_const_map      = has_const_key && has_const_value;
+
+  using insert_result = UnorderedMapInsertResult;
+
+  using HostMirror =
+      UnorderedMap<Key, Value, host_mirror_space, Hasher, EqualTo>;
+
+  using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
+
+  //@}
+
+ private:
+  enum : size_type { invalid_index = ~static_cast<size_type>(0) };
+
+  using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
+
+  using key_type_view = std::conditional_t<
+      is_insertable_map, View<key_type *, device_type>,
+      View<const key_type *, device_type, MemoryTraits<RandomAccess>>>;
+
+  using value_type_view = std::conditional_t<
+      is_insertable_map || is_modifiable_map,
+      View<impl_value_type *, device_type>,
+      View<const impl_value_type *, device_type, MemoryTraits<RandomAccess>>>;
+
+  using size_type_view = std::conditional_t<
+      is_insertable_map, View<size_type *, device_type>,
+      View<const size_type *, device_type, MemoryTraits<RandomAccess>>>;
+
+  using bitset_type = std::conditional_t<is_insertable_map, Bitset<Device>,
+                                         ConstBitset<Device>>;
+
+  enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
+  enum { num_scalars = 3 };
+  using scalars_view = View<int[num_scalars], LayoutLeft, device_type>;
+
+ public:
+  //! \name Public member functions
+  //@{
+
+  /// \brief Constructor
+  ///
+  /// \param capacity_hint [in] Initial guess of how many unique keys will be
+  /// inserted into the map \param hash [in] Hasher function for \c Key
+  /// instances.  The
+  ///   default value usually suffices.
+  UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
+               equal_to_type equal_to = equal_to_type())
+      : m_bounded_insert(true),
+        m_hasher(hasher),
+        m_equal_to(equal_to),
+        m_size(),
+        m_available_indexes(calculate_capacity(capacity_hint)),
+        m_hash_lists(view_alloc(WithoutInitializing, "UnorderedMap hash list"),
+                     Impl::find_hash_size(capacity())),
+        m_next_index(view_alloc(WithoutInitializing, "UnorderedMap next index"),
+                     capacity() + 1)  // +1 so that the *_at functions can
+                                      // always return a valid reference
+        ,
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+        m_keys("UnorderedMap keys", capacity() + 1),
+        m_values("UnorderedMap values", (is_set ? 1 : capacity() + 1)),
+#else
+        m_keys("UnorderedMap keys", capacity()),
+        m_values("UnorderedMap values", (is_set ? 0 : capacity())),
+#endif
+        m_scalars("UnorderedMap scalars") {
+    if (!is_insertable_map) {
+      Kokkos::Impl::throw_runtime_exception(
+          "Cannot construct a non-insertable (i.e. const key_type) "
+          "unordered_map");
+    }
+
+    Kokkos::deep_copy(m_hash_lists, invalid_index);
+    Kokkos::deep_copy(m_next_index, invalid_index);
+  }
+
+  void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
+
+  histogram_type get_histogram() { return histogram_type(*this); }
+
+  //! Clear all entries in the table.
+  void clear() {
+    m_bounded_insert = true;
+
+    if (capacity() == 0) return;
+
+    m_available_indexes.clear();
+
+    Kokkos::deep_copy(m_hash_lists, invalid_index);
+    Kokkos::deep_copy(m_next_index, invalid_index);
+    {
+      const key_type tmp = key_type();
+      Kokkos::deep_copy(m_keys, tmp);
+    }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+    if (is_set) {
+      const impl_value_type tmp = impl_value_type();
+      Kokkos::deep_copy(m_values, tmp);
+    }
+#endif
+    Kokkos::deep_copy(m_scalars, 0);
+    m_size = 0;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+    return (m_keys.is_allocated() && m_values.is_allocated() &&
+            m_scalars.is_allocated());
+#else
+    return (m_keys.is_allocated() && (is_set || m_values.is_allocated()) &&
+            m_scalars.is_allocated());
+#endif
+  }
+
+  /// \brief Change the capacity of the the map
+  ///
+  /// If there are no failed inserts the current size of the map will
+  /// be used as a lower bound for the input capacity.
+  /// If the map is not empty and does not have failed inserts
+  /// and the capacity changes then the current data is copied
+  /// into the resized / rehashed map.
+  ///
+  /// This is <i>not</i> a device function; it may <i>not</i> be
+  /// called in a parallel kernel.
+  bool rehash(size_type requested_capacity = 0) {
+    const bool bounded_insert = (capacity() == 0) || (size() == 0u);
+    return rehash(requested_capacity, bounded_insert);
+  }
+
+  bool rehash(size_type requested_capacity, bool bounded_insert) {
+    if (!is_insertable_map) return false;
+
+    const size_type curr_size = size();
+    requested_capacity =
+        (requested_capacity < curr_size) ? curr_size : requested_capacity;
+
+    insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
+
+    if (curr_size) {
+      tmp.m_bounded_insert = false;
+      Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *this);
+      f.apply();
+    }
+    tmp.m_bounded_insert = bounded_insert;
+
+    *this = tmp;
+
+    return true;
+  }
+
+  /// \brief The number of entries in the table.
+  ///
+  /// This method has undefined behavior when erasable() is true.
+  ///
+  /// Note that this is <i>not</i> a device function; it cannot be called in
+  /// a parallel kernel.  The value is not stored as a variable; it
+  /// must be computed. m_size is a mutable cache of that value.
+  size_type size() const {
+    if (capacity() == 0u) return 0u;
+    if (modified()) {
+      m_size = m_available_indexes.count();
+      reset_flag(modified_idx);
+    }
+    return m_size;
+  }
+
+  /// \brief The current number of failed insert() calls.
+  ///
+  /// This is <i>not</i> a device function; it may <i>not</i> be
+  /// called in a parallel kernel.  The value is not stored as a
+  /// variable; it must be computed.
+  bool failed_insert() const { return get_flag(failed_insert_idx); }
+
+  bool erasable() const {
+    return is_insertable_map ? get_flag(erasable_idx) : false;
+  }
+
+  bool begin_erase() {
+    bool result = !erasable();
+    if (is_insertable_map && result) {
+      execution_space().fence(
+          "Kokkos::UnorderedMap::begin_erase: fence before setting erasable "
+          "flag");
+      set_flag(erasable_idx);
+    }
+    return result;
+  }
+
+  bool end_erase() {
+    bool result = erasable();
+    if (is_insertable_map && result) {
+      execution_space().fence(
+          "Kokkos::UnorderedMap::end_erase: fence before erasing");
+      Impl::UnorderedMapErase<declared_map_type> f(*this);
+      f.apply();
+      execution_space().fence(
+          "Kokkos::UnorderedMap::end_erase: fence after erasing");
+      reset_flag(erasable_idx);
+    }
+    return result;
+  }
+
+  /// \brief The maximum number of entries that the table can hold.
+  ///
+  /// This <i>is</i> a device function; it may be called in a parallel
+  /// kernel.
+  KOKKOS_FORCEINLINE_FUNCTION
+  size_type capacity() const { return m_available_indexes.size(); }
+
+  /// \brief The number of hash table "buckets."
+  ///
+  /// This is different than the number of entries that the table can
+  /// hold.  Each key hashes to an index in [0, hash_capacity() - 1].
+  /// That index can hold zero or more entries.  This class decides
+  /// what hash_capacity() should be, given the user's upper bound on
+  /// the number of entries the table must be able to hold.
+  ///
+  /// This <i>is</i> a device function; it may be called in a parallel
+  /// kernel.
+  KOKKOS_INLINE_FUNCTION
+  size_type hash_capacity() const { return m_hash_lists.extent(0); }
+
+  //---------------------------------------------------------------------------
+  //---------------------------------------------------------------------------
+
+  /// This <i>is</i> a device function; it may be called in a parallel
+  /// kernel.  As discussed in the class documentation, it need not
+  /// succeed.  The return value tells you if it did.
+  ///
+  /// \param k [in] The key to attempt to insert.
+  /// \param v [in] The corresponding value to attempt to insert.  If
+  ///   using this class as a set (with Value = void), then you need not
+  ///   provide this value.
+  KOKKOS_INLINE_FUNCTION
+  insert_result insert(key_type const &k,
+                       impl_value_type const &v = impl_value_type()) const {
+    insert_result result;
+
+    if (!is_insertable_map || capacity() == 0u ||
+        m_scalars((int)erasable_idx)) {
+      return result;
+    }
+
+    if (!m_scalars((int)modified_idx)) {
+      m_scalars((int)modified_idx) = true;
+    }
+
+    int volatile &failed_insert_ref = m_scalars((int)failed_insert_idx);
+
+    const size_type hash_value = m_hasher(k);
+    const size_type hash_list  = hash_value % m_hash_lists.extent(0);
+
+    size_type *curr_ptr = &m_hash_lists[hash_list];
+    size_type new_index = invalid_index;
+
+    // Force integer multiply to long
+    size_type index_hint = static_cast<size_type>(
+        (static_cast<double>(hash_list) * capacity()) / m_hash_lists.extent(0));
+
+    size_type find_attempts = 0;
+
+    enum : unsigned { bounded_find_attempts = 32u };
+    const size_type max_attempts =
+        (m_bounded_insert &&
+         (bounded_find_attempts < m_available_indexes.max_hint()))
+            ? bounded_find_attempts
+            : m_available_indexes.max_hint();
+
+    bool not_done = true;
+
+#if defined(__MIC__)
+#pragma noprefetch
+#endif
+    while (not_done) {
+      // Continue searching the unordered list for this key,
+      // list will only be appended during insert phase.
+      // Need volatile_load as other threads may be appending.
+
+      // FIXME_SYCL replacement for memory_fence
+#ifdef KOKKOS_ENABLE_SYCL
+      size_type curr = Kokkos::atomic_load(curr_ptr);
+#else
+      size_type curr = volatile_load(curr_ptr);
+#endif
+
+      KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
+          &m_keys[curr != invalid_index ? curr : 0]);
+#if defined(__MIC__)
+#pragma noprefetch
+#endif
+      while (curr != invalid_index && !m_equal_to(
+#ifdef KOKKOS_ENABLE_SYCL
+                                          Kokkos::atomic_load(&m_keys[curr])
+#else
+                                          volatile_load(&m_keys[curr])
+#endif
+                                              ,
+                                          k)) {
+        result.increment_list_position();
+        index_hint = curr;
+        curr_ptr   = &m_next_index[curr];
+#ifdef KOKKOS_ENABLE_SYCL
+        curr = Kokkos::atomic_load(curr_ptr);
+#else
+        curr = volatile_load(curr_ptr);
+#endif
+        KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
+            &m_keys[curr != invalid_index ? curr : 0]);
+      }
+
+      //------------------------------------------------------------
+      // If key already present then return that index.
+      if (curr != invalid_index) {
+        const bool free_existing = new_index != invalid_index;
+        if (free_existing) {
+          // Previously claimed an unused entry that was not inserted.
+          // Release this unused entry immediately.
+          if (!m_available_indexes.reset(new_index)) {
+            KOKKOS_IMPL_DO_NOT_USE_PRINTF("Unable to free existing\n");
+          }
+        }
+
+        result.set_existing(curr, free_existing);
+        not_done = false;
+      }
+      //------------------------------------------------------------
+      // Key is not currently in the map.
+      // If the thread has claimed an entry try to insert now.
+      else {
+        //------------------------------------------------------------
+        // If have not already claimed an unused entry then do so now.
+        if (new_index == invalid_index) {
+          bool found = false;
+          // use the hash_list as the flag for the search direction
+          Kokkos::tie(found, index_hint) =
+              m_available_indexes.find_any_unset_near(index_hint, hash_list);
+
+          // found and index and this thread set it
+          if (!found && ++find_attempts >= max_attempts) {
+            failed_insert_ref = true;
+            not_done          = false;
+          } else if (m_available_indexes.set(index_hint)) {
+            new_index = index_hint;
+            // Set key and value
+            KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
+// FIXME_SYCL replacement for memory_fence
+#ifdef KOKKOS_ENABLE_SYCL
+            Kokkos::atomic_store(&m_keys[new_index], k);
+#else
+            m_keys[new_index] = k;
+#endif
+
+            if (!is_set) {
+              KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
+#ifdef KOKKOS_ENABLE_SYCL
+              Kokkos::atomic_store(&m_values[new_index], v);
+#else
+              m_values[new_index] = v;
+#endif
+            }
+
+#ifndef KOKKOS_ENABLE_SYCL
+            // Do not proceed until key and value are updated in global memory
+            memory_fence();
+#endif
+          }
+        } else if (failed_insert_ref) {
+          not_done = false;
+        }
+
+        // Attempt to append claimed entry into the list.
+        // Another thread may also be trying to append the same list so protect
+        // with atomic.
+        if (new_index != invalid_index &&
+            curr == atomic_compare_exchange(
+                        curr_ptr, static_cast<size_type>(invalid_index),
+                        new_index)) {
+          // Succeeded in appending
+          result.set_success(new_index);
+          not_done = false;
+        }
+      }
+    }  // while ( not_done )
+
+    return result;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool erase(key_type const &k) const {
+    bool result = false;
+
+    if (is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) {
+      if (!m_scalars((int)modified_idx)) {
+        m_scalars((int)modified_idx) = true;
+      }
+
+      size_type index = find(k);
+      if (valid_at(index)) {
+        m_available_indexes.reset(index);
+        result = true;
+      }
+    }
+
+    return result;
+  }
+
+  /// \brief Find the given key \c k, if it exists in the table.
+  ///
+  /// \return If the key exists in the table, the index of the
+  ///   value corresponding to that key; otherwise, an invalid index.
+  ///
+  /// This <i>is</i> a device function; it may be called in a parallel
+  /// kernel.
+  KOKKOS_INLINE_FUNCTION
+  size_type find(const key_type &k) const {
+    size_type curr = 0u < capacity()
+                         ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
+                         : invalid_index;
+
+    KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
+    while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
+      KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
+          &m_keys[curr != invalid_index ? curr : 0]);
+      curr = m_next_index[curr];
+    }
+
+    return curr;
+  }
+
+  /// \brief Does the key exist in the map
+  ///
+  /// This <i>is</i> a device function; it may be called in a parallel
+  /// kernel.
+  KOKKOS_INLINE_FUNCTION
+  bool exists(const key_type &k) const { return valid_at(find(k)); }
+
+  /// \brief Get the value with \c i as its direct index.
+  ///
+  /// \param i [in] Index directly into the array of entries.
+  ///
+  /// This <i>is</i> a device function; it may be called in a parallel
+  /// kernel.
+  ///
+  /// 'const value_type' via Cuda texture fetch must return by value.
+  template <typename Dummy = value_type>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      !std::is_void<Dummy>::value,  // !is_set
+      std::conditional_t<has_const_value, impl_value_type, impl_value_type &>>
+  value_at(size_type i) const {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+    return m_values[i < capacity() ? i : capacity()];
+#else
+    KOKKOS_EXPECTS(i < capacity());
+    return m_values[i];
+#endif
+  }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  template <typename Dummy = value_type>
+  KOKKOS_DEPRECATED_WITH_COMMENT(
+      "Calling value_at for value_type==void is deprecated!")
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      std::is_void<Dummy>::value,  // is_set
+      std::conditional_t<has_const_value, impl_value_type,
+                         impl_value_type &>> value_at(size_type /*i*/) const {
+    return m_values[0];
+  }
+#endif
+
+  /// \brief Get the key with \c i as its direct index.
+  ///
+  /// \param i [in] Index directly into the array of entries.
+  ///
+  /// This <i>is</i> a device function; it may be called in a parallel
+  /// kernel.
+  KOKKOS_FORCEINLINE_FUNCTION
+  key_type key_at(size_type i) const {
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+    return m_keys[i < capacity() ? i : capacity()];
+#else
+    KOKKOS_EXPECTS(i < capacity());
+    return m_keys[i];
+#endif
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool valid_at(size_type i) const { return m_available_indexes.test(i); }
+
+  template <typename SKey, typename SValue>
+  UnorderedMap(
+      UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src,
+      std::enable_if_t<
+          Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
+                                      SKey, SValue>::value,
+          int> = 0)
+      : m_bounded_insert(src.m_bounded_insert),
+        m_hasher(src.m_hasher),
+        m_equal_to(src.m_equal_to),
+        m_size(src.m_size),
+        m_available_indexes(src.m_available_indexes),
+        m_hash_lists(src.m_hash_lists),
+        m_next_index(src.m_next_index),
+        m_keys(src.m_keys),
+        m_values(src.m_values),
+        m_scalars(src.m_scalars) {}
+
+  template <typename SKey, typename SValue>
+  std::enable_if_t<
+      Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
+                                  SValue>::value,
+      declared_map_type &>
+  operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo> const &src) {
+    m_bounded_insert    = src.m_bounded_insert;
+    m_hasher            = src.m_hasher;
+    m_equal_to          = src.m_equal_to;
+    m_size              = src.m_size;
+    m_available_indexes = src.m_available_indexes;
+    m_hash_lists        = src.m_hash_lists;
+    m_next_index        = src.m_next_index;
+    m_keys              = src.m_keys;
+    m_values            = src.m_values;
+    m_scalars           = src.m_scalars;
+    return *this;
+  }
+
+  template <typename SKey, typename SValue, typename SDevice>
+  std::enable_if_t<std::is_same<std::remove_const_t<SKey>, key_type>::value &&
+                   std::is_same<std::remove_const_t<SValue>, value_type>::value>
+  create_copy_view(
+      UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo> const &src) {
+    if (m_hash_lists.data() != src.m_hash_lists.data()) {
+      insertable_map_type tmp;
+
+      tmp.m_bounded_insert    = src.m_bounded_insert;
+      tmp.m_hasher            = src.m_hasher;
+      tmp.m_equal_to          = src.m_equal_to;
+      tmp.m_size              = src.size();
+      tmp.m_available_indexes = bitset_type(src.capacity());
+      tmp.m_hash_lists        = size_type_view(
+          view_alloc(WithoutInitializing, "UnorderedMap hash list"),
+          src.m_hash_lists.extent(0));
+      tmp.m_next_index = size_type_view(
+          view_alloc(WithoutInitializing, "UnorderedMap next index"),
+          src.m_next_index.extent(0));
+      tmp.m_keys =
+          key_type_view(view_alloc(WithoutInitializing, "UnorderedMap keys"),
+                        src.m_keys.extent(0));
+      tmp.m_values = value_type_view(
+          view_alloc(WithoutInitializing, "UnorderedMap values"),
+          src.m_values.extent(0));
+      tmp.m_scalars = scalars_view("UnorderedMap scalars");
+
+      Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
+
+      using raw_deep_copy =
+          Kokkos::Impl::DeepCopy<typename device_type::memory_space,
+                                 typename SDevice::memory_space>;
+
+      raw_deep_copy(tmp.m_hash_lists.data(), src.m_hash_lists.data(),
+                    sizeof(size_type) * src.m_hash_lists.extent(0));
+      raw_deep_copy(tmp.m_next_index.data(), src.m_next_index.data(),
+                    sizeof(size_type) * src.m_next_index.extent(0));
+      raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
+                    sizeof(key_type) * src.m_keys.extent(0));
+      if (!is_set) {
+        raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
+                      sizeof(impl_value_type) * src.m_values.extent(0));
+      }
+      raw_deep_copy(tmp.m_scalars.data(), src.m_scalars.data(),
+                    sizeof(int) * num_scalars);
+
+      Kokkos::fence(
+          "Kokkos::UnorderedMap::create_copy_view: fence after copy to tmp");
+
+      *this = tmp;
+    }
+  }
+
+  //@}
+ private:  // private member functions
+  bool modified() const { return get_flag(modified_idx); }
+
+  void set_flag(int flag) const {
+    using raw_deep_copy =
+        Kokkos::Impl::DeepCopy<typename device_type::memory_space,
+                               Kokkos::HostSpace>;
+    const int true_ = true;
+    raw_deep_copy(m_scalars.data() + flag, &true_, sizeof(int));
+    Kokkos::fence(
+        "Kokkos::UnorderedMap::set_flag: fence after copying flag from "
+        "HostSpace");
+  }
+
+  void reset_flag(int flag) const {
+    using raw_deep_copy =
+        Kokkos::Impl::DeepCopy<typename device_type::memory_space,
+                               Kokkos::HostSpace>;
+    const int false_ = false;
+    raw_deep_copy(m_scalars.data() + flag, &false_, sizeof(int));
+    Kokkos::fence(
+        "Kokkos::UnorderedMap::reset_flag: fence after copying flag from "
+        "HostSpace");
+  }
+
+  bool get_flag(int flag) const {
+    using raw_deep_copy =
+        Kokkos::Impl::DeepCopy<Kokkos::HostSpace,
+                               typename device_type::memory_space>;
+    int result = false;
+    raw_deep_copy(&result, m_scalars.data() + flag, sizeof(int));
+    Kokkos::fence(
+        "Kokkos::UnorderedMap::get_flag: fence after copy to return value in "
+        "HostSpace");
+    return result;
+  }
+
+  static uint32_t calculate_capacity(uint32_t capacity_hint) {
+    // increase by 16% and round to nears multiple of 128
+    return capacity_hint
+               ? ((static_cast<uint32_t>(7ull * capacity_hint / 6u) + 127u) /
+                  128u) *
+                     128u
+               : 128u;
+  }
+
+ private:  // private members
+  bool m_bounded_insert;
+  hasher_type m_hasher;
+  equal_to_type m_equal_to;
+  mutable size_type m_size;
+  bitset_type m_available_indexes;
+  size_type_view m_hash_lists;
+  size_type_view m_next_index;
+  key_type_view m_keys;
+  value_type_view m_values;
+  scalars_view m_scalars;
+
+  template <typename KKey, typename VValue, typename DDevice, typename HHash,
+            typename EEqualTo>
+  friend class UnorderedMap;
+
+  template <typename UMap>
+  friend struct Impl::UnorderedMapErase;
+
+  template <typename UMap>
+  friend struct Impl::UnorderedMapHistogram;
+
+  template <typename UMap>
+  friend struct Impl::UnorderedMapPrint;
+};
+
+// Specialization of deep_copy for two UnorderedMap objects.
+template <typename DKey, typename DT, typename DDevice, typename SKey,
+          typename ST, typename SDevice, typename Hasher, typename EqualTo>
+inline void deep_copy(
+    UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
+    const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
+  dst.create_copy_view(src);
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_UNORDEREDMAP
+#endif
+#endif  // KOKKOS_UNORDERED_MAP_HPP
diff --git a/bundled/kokkos-3.7.00/containers/src/Kokkos_Vector.hpp b/bundled/kokkos-3.7.00/containers/src/Kokkos_Vector.hpp
new file mode 100644 (file)
index 0000000..8dd0807
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_VECTOR_HPP
+#define KOKKOS_VECTOR_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_VECTOR
+#endif
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_DualView.hpp>
+
+/* Drop in replacement for std::vector based on Kokkos::DualView
+ * Most functions only work on the host (it will not compile if called from
+ * device kernel)
+ *
+ */
+namespace Kokkos {
+
+template <class Scalar, class Arg1Type = void>
+class vector : public DualView<Scalar*, LayoutLeft, Arg1Type> {
+ public:
+  using value_type      = Scalar;
+  using pointer         = Scalar*;
+  using const_pointer   = const Scalar*;
+  using reference       = Scalar&;
+  using const_reference = const Scalar&;
+  using iterator        = Scalar*;
+  using const_iterator  = const Scalar*;
+  using size_type       = size_t;
+
+ private:
+  size_t _size;
+  float _extra_storage;
+  using DV = DualView<Scalar*, LayoutLeft, Arg1Type>;
+
+ public:
+#ifdef KOKKOS_ENABLE_CUDA_UVM
+  KOKKOS_INLINE_FUNCTION reference operator()(int i) const {
+    return DV::h_view(i);
+  };
+  KOKKOS_INLINE_FUNCTION reference operator[](int i) const {
+    return DV::h_view(i);
+  };
+#else
+  inline reference operator()(int i) const { return DV::h_view(i); };
+  inline reference operator[](int i) const { return DV::h_view(i); };
+#endif
+
+  /* Member functions which behave like std::vector functions */
+
+  vector() : DV() {
+    _size          = 0;
+    _extra_storage = 1.1;
+  }
+
+  vector(int n, Scalar val = Scalar())
+      : DualView<Scalar*, LayoutLeft, Arg1Type>("Vector", size_t(n * (1.1))) {
+    _size                 = n;
+    _extra_storage        = 1.1;
+    DV::modified_flags(0) = 1;
+
+    assign(n, val);
+  }
+
+  void resize(size_t n) {
+    if (n >= span()) DV::resize(size_t(n * _extra_storage));
+    _size = n;
+  }
+
+  void resize(size_t n, const Scalar& val) { assign(n, val); }
+
+  void assign(size_t n, const Scalar& val) {
+    /* Resize if necessary (behavior of std:vector) */
+
+    if (n > span()) DV::resize(size_t(n * _extra_storage));
+    _size = n;
+
+    /* Assign value either on host or on device */
+
+    if (DV::template need_sync<typename DV::t_dev::device_type>()) {
+      set_functor_host f(DV::h_view, val);
+      parallel_for("Kokkos::vector::assign", n, f);
+      typename DV::t_host::execution_space().fence(
+          "Kokkos::vector::assign: fence after assigning values");
+      DV::template modify<typename DV::t_host::device_type>();
+    } else {
+      set_functor f(DV::d_view, val);
+      parallel_for("Kokkos::vector::assign", n, f);
+      typename DV::t_dev::execution_space().fence(
+          "Kokkos::vector::assign: fence after assigning values");
+      DV::template modify<typename DV::t_dev::device_type>();
+    }
+  }
+
+  void reserve(size_t n) { DV::resize(size_t(n * _extra_storage)); }
+
+  void push_back(Scalar val) {
+    DV::template sync<typename DV::t_host::device_type>();
+    DV::template modify<typename DV::t_host::device_type>();
+    if (_size == span()) {
+      size_t new_size = _size * _extra_storage;
+      if (new_size == _size) new_size++;
+      DV::resize(new_size);
+    }
+
+    DV::h_view(_size) = val;
+    _size++;
+  }
+
+  void pop_back() { _size--; }
+
+  void clear() { _size = 0; }
+
+  iterator insert(iterator it, const value_type& val) {
+    return insert(it, 1, val);
+  }
+
+  iterator insert(iterator it, size_type count, const value_type& val) {
+    if ((size() == 0) && (it == begin())) {
+      resize(count, val);
+      DV::sync_host();
+      return begin();
+    }
+    DV::sync_host();
+    DV::modify_host();
+    if (std::less<>()(it, begin()) || std::less<>()(end(), it))
+      Kokkos::abort("Kokkos::vector::insert : invalid insert iterator");
+    if (count == 0) return it;
+    ptrdiff_t start = std::distance(begin(), it);
+    auto org_size   = size();
+    resize(size() + count);
+
+    std::copy_backward(begin() + start, begin() + org_size,
+                       begin() + org_size + count);
+    std::fill_n(begin() + start, count, val);
+
+    return begin() + start;
+  }
+
+ private:
+  template <class T>
+  struct impl_is_input_iterator
+      : /* TODO replace this */ std::integral_constant<
+            bool, !std::is_convertible<T, size_type>::value> {};
+
+ public:
+  // TODO: can use detection idiom to generate better error message here later
+  template <typename InputIterator>
+  std::enable_if_t<impl_is_input_iterator<InputIterator>::value, iterator>
+  insert(iterator it, InputIterator b, InputIterator e) {
+    ptrdiff_t count = std::distance(b, e);
+
+    DV::sync_host();
+    DV::modify_host();
+    if (std::less<>()(it, begin()) || std::less<>()(end(), it))
+      Kokkos::abort("Kokkos::vector::insert : invalid insert iterator");
+
+    ptrdiff_t start = std::distance(begin(), it);
+    auto org_size   = size();
+
+    // Note: resize(...) invalidates it; use begin() + start instead
+    resize(size() + count);
+
+    std::copy_backward(begin() + start, begin() + org_size,
+                       begin() + org_size + count);
+    std::copy(b, e, begin() + start);
+
+    return begin() + start;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return DV::is_allocated();
+  }
+
+  size_type size() const { return _size; }
+  size_type max_size() const { return 2000000000; }
+  size_type span() const { return DV::span(); }
+  bool empty() const { return _size == 0; }
+
+  pointer data() const { return DV::h_view.data(); }
+
+  iterator begin() const { return DV::h_view.data(); }
+
+  iterator end() const {
+    return _size > 0 ? DV::h_view.data() + _size : DV::h_view.data();
+  }
+
+  reference front() { return DV::h_view(0); }
+
+  reference back() { return DV::h_view(_size - 1); }
+
+  const_reference front() const { return DV::h_view(0); }
+
+  const_reference back() const { return DV::h_view(_size - 1); }
+
+  /* std::algorithms which work originally with iterators, here they are
+   * implemented as member functions */
+
+  size_t lower_bound(const size_t& start, const size_t& theEnd,
+                     const Scalar& comp_val) const {
+    int lower = start;  // FIXME (mfh 24 Apr 2014) narrowing conversion
+    int upper =
+        _size > theEnd
+            ? theEnd
+            : _size - 1;  // FIXME (mfh 24 Apr 2014) narrowing conversion
+    if (upper <= lower) {
+      return theEnd;
+    }
+
+    Scalar lower_val = DV::h_view(lower);
+    Scalar upper_val = DV::h_view(upper);
+    size_t idx       = (upper + lower) / 2;
+    Scalar val       = DV::h_view(idx);
+    if (val > upper_val) return upper;
+    if (val < lower_val) return start;
+
+    while (upper > lower) {
+      if (comp_val > val) {
+        lower = ++idx;
+      } else {
+        upper = idx;
+      }
+      idx = (upper + lower) / 2;
+      val = DV::h_view(idx);
+    }
+    return idx;
+  }
+
+  bool is_sorted() {
+    for (int i = 0; i < _size - 1; i++) {
+      if (DV::h_view(i) > DV::h_view(i + 1)) return false;
+    }
+    return true;
+  }
+
+  iterator find(Scalar val) const {
+    if (_size == 0) return end();
+
+    int upper, lower, current;
+    current = _size / 2;
+    upper   = _size - 1;
+    lower   = 0;
+
+    if ((val < DV::h_view(0)) || (val > DV::h_view(_size - 1))) return end();
+
+    while (upper > lower) {
+      if (val > DV::h_view(current))
+        lower = current + 1;
+      else
+        upper = current;
+      current = (upper + lower) / 2;
+    }
+
+    if (val == DV::h_view(current))
+      return &DV::h_view(current);
+    else
+      return end();
+  }
+
+  /* Additional functions for data management */
+
+  void device_to_host() { deep_copy(DV::h_view, DV::d_view); }
+  void host_to_device() const { deep_copy(DV::d_view, DV::h_view); }
+
+  void on_host() { DV::template modify<typename DV::t_host::device_type>(); }
+  void on_device() { DV::template modify<typename DV::t_dev::device_type>(); }
+
+  void set_overallocation(float extra) { _extra_storage = 1.0 + extra; }
+
+ public:
+  struct set_functor {
+    using execution_space = typename DV::t_dev::execution_space;
+    typename DV::t_dev _data;
+    Scalar _val;
+
+    set_functor(typename DV::t_dev data, Scalar val) : _data(data), _val(val) {}
+
+    KOKKOS_INLINE_FUNCTION
+    void operator()(const int& i) const { _data(i) = _val; }
+  };
+
+  struct set_functor_host {
+    using execution_space = typename DV::t_host::execution_space;
+    typename DV::t_host _data;
+    Scalar _val;
+
+    set_functor_host(typename DV::t_host data, Scalar val)
+        : _data(data), _val(val) {}
+
+    KOKKOS_INLINE_FUNCTION
+    void operator()(const int& i) const { _data(i) = _val; }
+  };
+};
+
+}  // namespace Kokkos
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_VECTOR
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_VECTOR
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_Bitset_impl.hpp b/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_Bitset_impl.hpp
new file mode 100644 (file)
index 0000000..134b307
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_BITSET_IMPL_HPP
+#define KOKKOS_BITSET_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_BitOps.hpp>
+#include <cstdint>
+
+#include <cstdio>
+#include <climits>
+#include <iostream>
+#include <iomanip>
+
+namespace Kokkos {
+namespace Impl {
+
+KOKKOS_FORCEINLINE_FUNCTION
+unsigned rotate_right(unsigned i, int r) {
+  constexpr int size = static_cast<int>(sizeof(unsigned) * CHAR_BIT);
+  return r ? ((i >> r) | (i << (size - r))) : i;
+}
+
+template <typename Bitset>
+struct BitsetCount {
+  using bitset_type = Bitset;
+  using execution_space =
+      typename bitset_type::execution_space::execution_space;
+  using size_type  = typename bitset_type::size_type;
+  using value_type = size_type;
+
+  bitset_type m_bitset;
+
+  BitsetCount(bitset_type const& bitset) : m_bitset(bitset) {}
+
+  size_type apply() const {
+    size_type count = 0u;
+    parallel_reduce("Kokkos::Impl::BitsetCount::apply",
+                    m_bitset.m_blocks.extent(0), *this, count);
+    return count;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& count) const { count = 0u; }
+
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& count, const size_type& incr) const { count += incr; }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(size_type i, value_type& count) const {
+    count += bit_count(m_bitset.m_blocks[i]);
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_BITSET_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_Functional_impl.hpp b/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_Functional_impl.hpp
new file mode 100644 (file)
index 0000000..5fe3ab0
--- /dev/null
@@ -0,0 +1,189 @@
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+
+#ifndef KOKKOS_FUNCTIONAL_IMPL_HPP
+#define KOKKOS_FUNCTIONAL_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <cstdint>
+
+namespace Kokkos {
+namespace Impl {
+
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+KOKKOS_FORCEINLINE_FUNCTION
+uint32_t getblock32(const uint8_t* p, int i) {
+  // used to avoid aliasing error which could cause errors with
+  // forced inlining
+  return ((uint32_t)p[i * 4 + 0]) | ((uint32_t)p[i * 4 + 1] << 8) |
+         ((uint32_t)p[i * 4 + 2] << 16) | ((uint32_t)p[i * 4 + 3] << 24);
+}
+
+KOKKOS_FORCEINLINE_FUNCTION
+uint32_t rotl32(uint32_t x, int8_t r) { return (x << r) | (x >> (32 - r)); }
+
+KOKKOS_FORCEINLINE_FUNCTION
+uint32_t fmix32(uint32_t h) {
+  h ^= h >> 16;
+  h *= 0x85ebca6b;
+  h ^= h >> 13;
+  h *= 0xc2b2ae35;
+  h ^= h >> 16;
+
+  return h;
+}
+
+KOKKOS_INLINE_FUNCTION
+uint32_t MurmurHash3_x86_32(const void* key, int len, uint32_t seed) {
+  const uint8_t* data = static_cast<const uint8_t*>(key);
+  const int nblocks   = len / 4;
+
+  uint32_t h1 = seed;
+
+  const uint32_t c1 = 0xcc9e2d51;
+  const uint32_t c2 = 0x1b873593;
+
+  //----------
+  // body
+
+  for (int i = 0; i < nblocks; ++i) {
+    uint32_t k1 = getblock32(data, i);
+
+    k1 *= c1;
+    k1 = rotl32(k1, 15);
+    k1 *= c2;
+
+    h1 ^= k1;
+    h1 = rotl32(h1, 13);
+    h1 = h1 * 5 + 0xe6546b64;
+  }
+
+  //----------
+  // tail
+
+  const uint8_t* tail = (const uint8_t*)(data + nblocks * 4);
+
+  uint32_t k1 = 0;
+
+  switch (len & 3) {
+    case 3: k1 ^= tail[2] << 16; KOKKOS_IMPL_FALLTHROUGH
+    case 2: k1 ^= tail[1] << 8; KOKKOS_IMPL_FALLTHROUGH
+    case 1:
+      k1 ^= tail[0];
+      k1 *= c1;
+      k1 = rotl32(k1, 15);
+      k1 *= c2;
+      h1 ^= k1;
+  };
+
+  //----------
+  // finalization
+
+  h1 ^= len;
+
+  h1 = fmix32(h1);
+
+  return h1;
+}
+
+#if defined(__GNUC__) /* GNU C   */ || defined(__GNUG__) /* GNU C++ */ || \
+    defined(__clang__)
+
+#define KOKKOS_IMPL_MAY_ALIAS __attribute__((__may_alias__))
+
+#else
+
+#define KOKKOS_IMPL_MAY_ALIAS
+
+#endif
+
+template <typename T>
+KOKKOS_FORCEINLINE_FUNCTION bool bitwise_equal(T const* const a_ptr,
+                                               T const* const b_ptr) {
+  typedef uint64_t KOKKOS_IMPL_MAY_ALIAS T64;  // NOLINT(modernize-use-using)
+  typedef uint32_t KOKKOS_IMPL_MAY_ALIAS T32;  // NOLINT(modernize-use-using)
+  typedef uint16_t KOKKOS_IMPL_MAY_ALIAS T16;  // NOLINT(modernize-use-using)
+  typedef uint8_t KOKKOS_IMPL_MAY_ALIAS T8;    // NOLINT(modernize-use-using)
+
+  enum {
+    NUM_8  = sizeof(T),
+    NUM_16 = NUM_8 / 2,
+    NUM_32 = NUM_8 / 4,
+    NUM_64 = NUM_8 / 8
+  };
+
+  union {
+    T const* const ptr;
+    T64 const* const ptr64;
+    T32 const* const ptr32;
+    T16 const* const ptr16;
+    T8 const* const ptr8;
+  } a = {a_ptr}, b = {b_ptr};
+
+  bool result = true;
+
+  for (int i = 0; i < NUM_64; ++i) {
+    result = result && a.ptr64[i] == b.ptr64[i];
+  }
+
+  if (NUM_64 * 2 < NUM_32) {
+    result = result && a.ptr32[NUM_64 * 2] == b.ptr32[NUM_64 * 2];
+  }
+
+  if (NUM_32 * 2 < NUM_16) {
+    result = result && a.ptr16[NUM_32 * 2] == b.ptr16[NUM_32 * 2];
+  }
+
+  if (NUM_16 * 2 < NUM_8) {
+    result = result && a.ptr8[NUM_16 * 2] == b.ptr8[NUM_16 * 2];
+  }
+
+  return result;
+}
+
+#undef KOKKOS_IMPL_MAY_ALIAS
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_FUNCTIONAL_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_StaticCrsGraph_factory.hpp b/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_StaticCrsGraph_factory.hpp
new file mode 100644 (file)
index 0000000..b81b1ee
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_STATICCRSGRAPH_FACTORY_HPP
+#define KOKKOS_IMPL_STATICCRSGRAPH_FACTORY_HPP
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+#include <Kokkos_Core.hpp>
+#include <Kokkos_StaticCrsGraph.hpp>
+
+namespace Kokkos {
+
+template <class DataType, class Arg1Type, class Arg2Type, class Arg3Type,
+          typename SizeType>
+inline typename StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                               SizeType>::HostMirror
+create_mirror_view(const StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                                        SizeType>& view,
+                   std::enable_if_t<ViewTraits<DataType, Arg1Type, Arg2Type,
+                                               Arg3Type>::is_hostspace>* = 0) {
+  return view;
+}
+
+template <class DataType, class Arg1Type, class Arg2Type, class Arg3Type,
+          typename SizeType>
+inline typename StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                               SizeType>::HostMirror
+create_mirror(const StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                                   SizeType>& view) {
+  // Force copy:
+  // using alloc = Impl::ViewAssignment<Impl::ViewDefault>; // unused
+  using staticcrsgraph_type =
+      StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type, SizeType>;
+
+  typename staticcrsgraph_type::HostMirror tmp;
+  typename staticcrsgraph_type::row_map_type::HostMirror tmp_row_map =
+      create_mirror(view.row_map);
+  typename staticcrsgraph_type::row_block_type::HostMirror
+      tmp_row_block_offsets = create_mirror(view.row_block_offsets);
+
+  // Allocation to match:
+  tmp.row_map = tmp_row_map;  // Assignment of 'const' from 'non-const'
+  tmp.entries = create_mirror(view.entries);
+  tmp.row_block_offsets =
+      tmp_row_block_offsets;  // Assignment of 'const' from 'non-const'
+
+  // Deep copy:
+  deep_copy(tmp_row_map, view.row_map);
+  deep_copy(tmp.entries, view.entries);
+  deep_copy(tmp_row_block_offsets, view.row_block_offsets);
+
+  return tmp;
+}
+
+template <class DataType, class Arg1Type, class Arg2Type, class Arg3Type,
+          typename SizeType>
+inline typename StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                               SizeType>::HostMirror
+create_mirror_view(const StaticCrsGraph<DataType, Arg1Type, Arg2Type, Arg3Type,
+                                        SizeType>& view,
+                   std::enable_if_t<!ViewTraits<DataType, Arg1Type, Arg2Type,
+                                                Arg3Type>::is_hostspace>* = 0) {
+  return create_mirror(view);
+}
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class StaticCrsGraphType, class InputSizeType>
+inline typename StaticCrsGraphType::staticcrsgraph_type create_staticcrsgraph(
+    const std::string& label, const std::vector<InputSizeType>& input) {
+  using output_type  = StaticCrsGraphType;
+  using entries_type = typename output_type::entries_type;
+  using work_type    = View<
+      typename output_type::size_type[], typename output_type::array_layout,
+      typename output_type::device_type, typename output_type::memory_traits>;
+
+  output_type output;
+
+  // Create the row map:
+
+  const size_t length = input.size();
+
+  {
+    work_type row_work("tmp", length + 1);
+
+    typename work_type::HostMirror row_work_host = create_mirror_view(row_work);
+
+    size_t sum       = 0;
+    row_work_host[0] = 0;
+    for (size_t i = 0; i < length; ++i) {
+      row_work_host[i + 1] = sum += input[i];
+    }
+
+    deep_copy(row_work, row_work_host);
+
+    output.entries = entries_type(label, sum);
+    output.row_map = row_work;
+  }
+
+  return output;
+}
+
+//----------------------------------------------------------------------------
+
+template <class StaticCrsGraphType, class InputSizeType>
+inline typename StaticCrsGraphType::staticcrsgraph_type create_staticcrsgraph(
+    const std::string& label,
+    const std::vector<std::vector<InputSizeType> >& input) {
+  using output_type  = StaticCrsGraphType;
+  using entries_type = typename output_type::entries_type;
+
+  static_assert(entries_type::rank == 1, "Graph entries view must be rank one");
+
+  using work_type = View<
+      typename output_type::size_type[], typename output_type::array_layout,
+      typename output_type::device_type, typename output_type::memory_traits>;
+
+  output_type output;
+
+  // Create the row map:
+
+  const size_t length = input.size();
+
+  {
+    work_type row_work("tmp", length + 1);
+
+    typename work_type::HostMirror row_work_host = create_mirror_view(row_work);
+
+    size_t sum       = 0;
+    row_work_host[0] = 0;
+    for (size_t i = 0; i < length; ++i) {
+      row_work_host[i + 1] = sum += input[i].size();
+    }
+
+    deep_copy(row_work, row_work_host);
+
+    output.entries = entries_type(label, sum);
+    output.row_map = row_work;
+  }
+
+  // Fill in the entries:
+  {
+    typename entries_type::HostMirror host_entries =
+        create_mirror_view(output.entries);
+
+    size_t sum = 0;
+    for (size_t i = 0; i < length; ++i) {
+      for (size_t j = 0; j < input[i].size(); ++j, ++sum) {
+        host_entries(sum) = input[i][j];
+      }
+    }
+
+    deep_copy(output.entries, host_entries);
+  }
+
+  return output;
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_IMPL_CRSARRAY_FACTORY_HPP */
diff --git a/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_UnorderedMap_impl.cpp b/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_UnorderedMap_impl.cpp
new file mode 100644 (file)
index 0000000..fc86199
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_UnorderedMap.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+uint32_t find_hash_size(uint32_t size) {
+  if (size == 0u) return 0u;
+
+  // these primes try to preserve randomness of hash
+  static const uint32_t primes[] = {
+      3,         7,         13,        23,        53,        97,
+      193,       389,       769,       1543,      2237,      2423,
+      2617,      2797,      2999,      3167,      3359,      3539,
+      3727,      3911,      4441,      4787,      5119,      5471,
+      5801,      6143,      6521,      6827,      7177,      7517,
+      7853,      8887,      9587,      10243,     10937,     11617,
+      12289,     12967,     13649,     14341,     15013,     15727,
+      17749,     19121,     20479,     21859,     23209,     24593,
+      25939,     27329,     28669,     30047,     31469,     35507,
+      38231,     40961,     43711,     46439,     49157,     51893,
+      54617,     57347,     60077,     62801,     70583,     75619,
+      80669,     85703,     90749,     95783,     100823,    105871,
+      110909,    115963,    120997,    126031,    141157,    151237,
+      161323,    171401,    181499,    191579,    201653,    211741,
+      221813,    231893,    241979,    252079,    282311,    302483,
+      322649,    342803,    362969,    383143,    403301,    423457,
+      443629,    463787,    483953,    504121,    564617,    604949,
+      645313,    685609,    725939,    766273,    806609,    846931,
+      887261,    927587,    967919,    1008239,   1123477,   1198397,
+      1273289,   1348177,   1423067,   1497983,   1572869,   1647761,
+      1722667,   1797581,   1872461,   1947359,   2022253,   2246953,
+      2396759,   2546543,   2696363,   2846161,   2995973,   3145739,
+      3295541,   3445357,   3595117,   3744941,   3894707,   4044503,
+      4493921,   4793501,   5093089,   5392679,   5692279,   5991883,
+      6291469,   6591059,   6890641,   7190243,   7489829,   7789447,
+      8089033,   8987807,   9586981,   10186177,  10785371,  11384539,
+      11983729,  12582917,  13182109,  13781291,  14380469,  14979667,
+      15578861,  16178053,  17895707,  19014187,  20132683,  21251141,
+      22369661,  23488103,  24606583,  25725083,  26843549,  27962027,
+      29080529,  30198989,  31317469,  32435981,  35791397,  38028379,
+      40265327,  42502283,  44739259,  46976221,  49213237,  51450131,
+      53687099,  55924061,  58161041,  60397993,  62634959,  64871921,
+      71582857,  76056727,  80530643,  85004567,  89478503,  93952427,
+      98426347,  102900263, 107374217, 111848111, 116322053, 120795971,
+      125269877, 129743807, 143165587, 152113427, 161061283, 170009141,
+      178956983, 187904819, 196852693, 205800547, 214748383, 223696237,
+      232644089, 241591943, 250539763, 259487603, 268435399};
+
+  const uint32_t num_primes = sizeof(primes) / sizeof(uint32_t);
+
+  uint32_t hsize = primes[num_primes - 1];
+  for (uint32_t i = 0; i < num_primes; ++i) {
+    if (size <= primes[i]) {
+      hsize = primes[i];
+      break;
+    }
+  }
+  return hsize;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_UnorderedMap_impl.hpp b/bundled/kokkos-3.7.00/containers/src/impl/Kokkos_UnorderedMap_impl.hpp
new file mode 100644 (file)
index 0000000..5acba24
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_UNORDERED_MAP_IMPL_HPP
+#define KOKKOS_UNORDERED_MAP_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include <cstdint>
+
+#include <cstdio>
+#include <climits>
+#include <iostream>
+#include <iomanip>
+
+namespace Kokkos {
+namespace Impl {
+
+uint32_t find_hash_size(uint32_t size);
+
+template <typename Map>
+struct UnorderedMapRehash {
+  using map_type        = Map;
+  using const_map_type  = typename map_type::const_map_type;
+  using execution_space = typename map_type::execution_space;
+  using size_type       = typename map_type::size_type;
+
+  map_type m_dst;
+  const_map_type m_src;
+
+  UnorderedMapRehash(map_type const& dst, const_map_type const& src)
+      : m_dst(dst), m_src(src) {}
+
+  void apply() const {
+    parallel_for("Kokkos::Impl::UnorderedMapRehash::apply", m_src.capacity(),
+                 *this);
+  }
+
+  template <typename Dummy = typename map_type::value_type>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_void<Dummy>::value>
+  operator()(size_type i) const {
+    if (m_src.valid_at(i)) m_dst.insert(m_src.key_at(i));
+  }
+
+  template <typename Dummy = typename map_type::value_type>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<!std::is_void<Dummy>::value>
+  operator()(size_type i) const {
+    if (m_src.valid_at(i)) m_dst.insert(m_src.key_at(i), m_src.value_at(i));
+  }
+};
+
+template <typename UMap>
+struct UnorderedMapErase {
+  using map_type        = UMap;
+  using execution_space = typename map_type::execution_space;
+  using size_type       = typename map_type::size_type;
+  using key_type        = typename map_type::key_type;
+  using value_type      = typename map_type::impl_value_type;
+
+  map_type m_map;
+
+  UnorderedMapErase(map_type const& map) : m_map(map) {}
+
+  void apply() const {
+    parallel_for("Kokkos::Impl::UnorderedMapErase::apply",
+                 m_map.m_hash_lists.extent(0), *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(size_type i) const {
+    const size_type invalid_index = map_type::invalid_index;
+
+    size_type curr = m_map.m_hash_lists(i);
+    size_type next = invalid_index;
+
+    // remove erased head of the linked-list
+    while (curr != invalid_index && !m_map.valid_at(curr)) {
+      next                     = m_map.m_next_index[curr];
+      m_map.m_next_index[curr] = invalid_index;
+      m_map.m_keys[curr]       = key_type();
+      if (m_map.is_set) m_map.m_values[curr] = value_type();
+      curr                  = next;
+      m_map.m_hash_lists(i) = next;
+    }
+
+    // if the list is non-empty and the head is valid
+    if (curr != invalid_index && m_map.valid_at(curr)) {
+      size_type prev = curr;
+      curr           = m_map.m_next_index[prev];
+
+      while (curr != invalid_index) {
+        next = m_map.m_next_index[curr];
+        if (m_map.valid_at(curr)) {
+          prev = curr;
+        } else {
+          // remove curr from list
+          m_map.m_next_index[prev] = next;
+          m_map.m_next_index[curr] = invalid_index;
+          m_map.m_keys[curr]       = key_type();
+          if (map_type::is_set) m_map.m_values[curr] = value_type();
+        }
+        curr = next;
+      }
+    }
+  }
+};
+
+template <typename UMap>
+struct UnorderedMapHistogram {
+  using map_type        = UMap;
+  using execution_space = typename map_type::execution_space;
+  using size_type       = typename map_type::size_type;
+
+  using histogram_view      = View<int[100], typename map_type::device_type>;
+  using host_histogram_view = typename histogram_view::HostMirror;
+
+  map_type m_map;
+  histogram_view m_length;
+  histogram_view m_distance;
+  histogram_view m_block_distance;
+
+  UnorderedMapHistogram(map_type const& map)
+      : m_map(map),
+        m_length("UnorderedMap Histogram"),
+        m_distance("UnorderedMap Histogram"),
+        m_block_distance("UnorderedMap Histogram") {}
+
+  void calculate() {
+    parallel_for("Kokkos::Impl::UnorderedMapHistogram::calculate",
+                 m_map.m_hash_lists.extent(0), *this);
+  }
+
+  void clear() {
+    Kokkos::deep_copy(m_length, 0);
+    Kokkos::deep_copy(m_distance, 0);
+    Kokkos::deep_copy(m_block_distance, 0);
+  }
+
+  void print_length(std::ostream& out) {
+    host_histogram_view host_copy =
+        create_mirror_view_and_copy(Kokkos::HostSpace{}, m_length);
+
+    for (int i = 0, size = host_copy.extent(0); i < size; ++i) {
+      out << host_copy[i] << " , ";
+    }
+    out << "\b\b\b   " << std::endl;
+  }
+
+  void print_distance(std::ostream& out) {
+    host_histogram_view host_copy =
+        create_mirror_view_and_copy(Kokkos::HostSpace{}, m_distance);
+
+    for (int i = 0, size = host_copy.extent(0); i < size; ++i) {
+      out << host_copy[i] << " , ";
+    }
+    out << "\b\b\b   " << std::endl;
+  }
+
+  void print_block_distance(std::ostream& out) {
+    host_histogram_view host_copy =
+        create_mirror_view_and_copy(Kokkos::HostSpace{}, m_block_distance);
+
+    for (int i = 0, size = host_copy.extent(0); i < size; ++i) {
+      out << host_copy[i] << " , ";
+    }
+    out << "\b\b\b   " << std::endl;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(size_type i) const {
+    const size_type invalid_index = map_type::invalid_index;
+
+    uint32_t length     = 0;
+    size_type min_index = ~0u, max_index = 0;
+    for (size_type curr = m_map.m_hash_lists(i); curr != invalid_index;
+         curr           = m_map.m_next_index[curr]) {
+      ++length;
+      min_index = (curr < min_index) ? curr : min_index;
+      max_index = (max_index < curr) ? curr : max_index;
+    }
+
+    size_type distance = (0u < length) ? max_index - min_index : 0u;
+    size_type blocks   = (0u < length) ? max_index / 32u - min_index / 32u : 0u;
+
+    // normalize data
+    length   = length < 100u ? length : 99u;
+    distance = distance < 100u ? distance : 99u;
+    blocks   = blocks < 100u ? blocks : 99u;
+
+    if (0u < length) {
+      atomic_fetch_add(&m_length(length), 1);
+      atomic_fetch_add(&m_distance(distance), 1);
+      atomic_fetch_add(&m_block_distance(blocks), 1);
+    }
+  }
+};
+
+template <typename UMap>
+struct UnorderedMapPrint {
+  using map_type        = UMap;
+  using execution_space = typename map_type::execution_space;
+  using size_type       = typename map_type::size_type;
+
+  map_type m_map;
+
+  UnorderedMapPrint(map_type const& map) : m_map(map) {}
+
+  void apply() {
+    parallel_for("Kokkos::Impl::UnorderedMapPrint::apply",
+                 m_map.m_hash_lists.extent(0), *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(size_type i) const {
+    const size_type invalid_index = map_type::invalid_index;
+
+    uint32_t list = m_map.m_hash_lists(i);
+    for (size_type curr = list, ii = 0; curr != invalid_index;
+         curr = m_map.m_next_index[curr], ++ii) {
+      KOKKOS_IMPL_DO_NOT_USE_PRINTF("%d[%d]: %d->%d\n", list, ii,
+                                    m_map.key_at(curr), m_map.value_at(curr));
+    }
+  }
+};
+
+template <typename DKey, typename DValue, typename SKey, typename SValue>
+struct UnorderedMapCanAssign : public std::false_type {};
+
+template <typename Key, typename Value>
+struct UnorderedMapCanAssign<Key, Value, Key, Value> : public std::true_type {};
+
+template <typename Key, typename Value>
+struct UnorderedMapCanAssign<const Key, Value, Key, Value>
+    : public std::true_type {};
+
+template <typename Key, typename Value>
+struct UnorderedMapCanAssign<const Key, const Value, Key, Value>
+    : public std::true_type {};
+
+template <typename Key, typename Value>
+struct UnorderedMapCanAssign<const Key, const Value, const Key, Value>
+    : public std::true_type {};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_UNORDERED_MAP_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_CudaSpace.cpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_CudaSpace.cpp
new file mode 100644 (file)
index 0000000..b2161bc
--- /dev/null
@@ -0,0 +1,716 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <Kokkos_Core.hpp>
+#include <Kokkos_Cuda.hpp>
+#include <Kokkos_CudaSpace.hpp>
+
+#include <cstdlib>
+#include <iostream>
+#include <sstream>
+#include <algorithm>
+#include <atomic>
+
+//#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+cudaStream_t Kokkos::Impl::cuda_get_deep_copy_stream() {
+  static cudaStream_t s = nullptr;
+  if (s == nullptr) {
+    cudaStreamCreate(&s);
+  }
+  return s;
+}
+
+const std::unique_ptr<Kokkos::Cuda> &Kokkos::Impl::cuda_get_deep_copy_space(
+    bool initialize) {
+  static std::unique_ptr<Cuda> space = nullptr;
+  if (!space && initialize)
+    space = std::make_unique<Cuda>(Kokkos::Impl::cuda_get_deep_copy_stream());
+  return space;
+}
+
+namespace Kokkos {
+namespace Impl {
+
+namespace {
+
+static std::atomic<int> num_uvm_allocations(0);
+
+}  // namespace
+
+void DeepCopyCuda(void *dst, const void *src, size_t n) {
+  KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemcpy(dst, src, n, cudaMemcpyDefault));
+}
+
+void DeepCopyAsyncCuda(const Cuda &instance, void *dst, const void *src,
+                       size_t n) {
+  KOKKOS_IMPL_CUDA_SAFE_CALL(
+      cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, instance.cuda_stream()));
+}
+
+void DeepCopyAsyncCuda(void *dst, const void *src, size_t n) {
+  cudaStream_t s = cuda_get_deep_copy_stream();
+  KOKKOS_IMPL_CUDA_SAFE_CALL(
+      cudaMemcpyAsync(dst, src, n, cudaMemcpyDefault, s));
+  Impl::cuda_stream_synchronize(
+      s,
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          DeepCopyResourceSynchronization,
+      "Kokkos::Impl::DeepCopyAsyncCuda: Deep Copy Stream Sync");
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+KOKKOS_DEPRECATED void CudaSpace::access_error() {
+  const std::string msg(
+      "Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
+      "non-Cuda space");
+  Kokkos::Impl::throw_runtime_exception(msg);
+}
+
+KOKKOS_DEPRECATED void CudaSpace::access_error(const void *const) {
+  const std::string msg(
+      "Kokkos::CudaSpace::access_error attempt to execute Cuda function from "
+      "non-Cuda space");
+  Kokkos::Impl::throw_runtime_exception(msg);
+}
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+bool CudaUVMSpace::available() {
+#if defined(CUDA_VERSION) && !defined(__APPLE__)
+  enum : bool { UVM_available = true };
+#else
+  enum : bool { UVM_available = false };
+#endif
+  return UVM_available;
+}
+
+/*--------------------------------------------------------------------------*/
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+int CudaUVMSpace::number_of_allocations() {
+  return Kokkos::Impl::num_uvm_allocations.load();
+}
+#endif
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+// The purpose of the following variable is to allow a state-based choice
+// for pinning UVM allocations to the CPU. For now this is considered
+// an experimental debugging capability - with the potential to work around
+// some CUDA issues.
+bool CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = false;
+
+bool CudaUVMSpace::cuda_pin_uvm_to_host() {
+  return CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v;
+}
+void CudaUVMSpace::cuda_set_pin_uvm_to_host(bool val) {
+  CudaUVMSpace::kokkos_impl_cuda_pin_uvm_to_host_v = val;
+}
+#endif
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+bool kokkos_impl_cuda_pin_uvm_to_host() {
+  return Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host();
+}
+
+void kokkos_impl_cuda_set_pin_uvm_to_host(bool val) {
+  Kokkos::CudaUVMSpace::cuda_set_pin_uvm_to_host(val);
+}
+#endif
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+CudaSpace::CudaSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
+
+CudaUVMSpace::CudaUVMSpace() : m_device(Kokkos::Cuda().cuda_device()) {}
+
+CudaHostPinnedSpace::CudaHostPinnedSpace() {}
+
+int memory_threshold_g = 40000;  // 40 kB
+
+//==============================================================================
+// <editor-fold desc="allocate()"> {{{1
+
+void *CudaSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void *CudaSpace::allocate(const Cuda &exec_space, const char *arg_label,
+                          const size_t arg_alloc_size,
+                          const size_t arg_logical_size) const {
+  return impl_allocate(exec_space, arg_label, arg_alloc_size, arg_logical_size);
+}
+void *CudaSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
+                          const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+
+namespace {
+void *impl_allocate_common(const Cuda &exec_space, const char *arg_label,
+                           const size_t arg_alloc_size,
+                           const size_t arg_logical_size,
+                           const Kokkos::Tools::SpaceHandle arg_handle,
+                           bool exec_space_provided) {
+  void *ptr = nullptr;
+
+#ifndef CUDART_VERSION
+#error CUDART_VERSION undefined!
+#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
+  cudaError_t error_code;
+  if (arg_alloc_size >= memory_threshold_g) {
+    if (exec_space_provided) {
+      cudaStream_t stream = exec_space.cuda_stream();
+      error_code          = cudaMallocAsync(&ptr, arg_alloc_size, stream);
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
+    } else {
+      error_code = cudaMallocAsync(&ptr, arg_alloc_size, 0);
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
+    }
+  } else {
+    error_code = cudaMalloc(&ptr, arg_alloc_size);
+  }
+#else
+  (void)exec_space;
+  (void)exec_space_provided;
+  auto error_code = cudaMalloc(&ptr, arg_alloc_size);
+#endif
+  if (error_code != cudaSuccess) {  // TODO tag as unlikely branch
+    cudaGetLastError();  // This is the only way to clear the last error, which
+                         // we should do here since we're turning it into an
+                         // exception here
+    throw Experimental::CudaRawMemoryAllocationFailure(
+        arg_alloc_size, error_code,
+        Experimental::RawMemoryAllocationFailure::AllocationMechanism::
+            CudaMalloc);
+  }
+
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+  return ptr;
+}
+}  // namespace
+
+void *CudaSpace::impl_allocate(
+    const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  return impl_allocate_common(Kokkos::Cuda{}, arg_label, arg_alloc_size,
+                              arg_logical_size, arg_handle, false);
+}
+
+void *CudaSpace::impl_allocate(
+    const Cuda &exec_space, const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  return impl_allocate_common(exec_space, arg_label, arg_alloc_size,
+                              arg_logical_size, arg_handle, true);
+}
+
+void *CudaUVMSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void *CudaUVMSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
+                             const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void *CudaUVMSpace::impl_allocate(
+    const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  void *ptr = nullptr;
+
+  Cuda::impl_static_fence(
+      "Kokkos::CudaUVMSpace::impl_allocate: Pre UVM Allocation");
+  if (arg_alloc_size > 0) {
+    Kokkos::Impl::num_uvm_allocations++;
+
+    auto error_code =
+        cudaMallocManaged(&ptr, arg_alloc_size, cudaMemAttachGlobal);
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+    if (Kokkos::CudaUVMSpace::cuda_pin_uvm_to_host())
+      cudaMemAdvise(ptr, arg_alloc_size, cudaMemAdviseSetPreferredLocation,
+                    cudaCpuDeviceId);
+#endif
+
+    if (error_code != cudaSuccess) {  // TODO tag as unlikely branch
+      cudaGetLastError();  // This is the only way to clear the last error,
+                           // which we should do here since we're turning it
+                           // into an exception here
+      throw Experimental::CudaRawMemoryAllocationFailure(
+          arg_alloc_size, error_code,
+          Experimental::RawMemoryAllocationFailure::AllocationMechanism::
+              CudaMallocManaged);
+    }
+  }
+  Cuda::impl_static_fence(
+      "Kokkos::CudaUVMSpace::impl_allocate: Post UVM Allocation");
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+  return ptr;
+}
+void *CudaHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void *CudaHostPinnedSpace::allocate(const char *arg_label,
+                                    const size_t arg_alloc_size,
+                                    const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void *CudaHostPinnedSpace::impl_allocate(
+    const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  void *ptr = nullptr;
+
+  auto error_code = cudaHostAlloc(&ptr, arg_alloc_size, cudaHostAllocDefault);
+  if (error_code != cudaSuccess) {  // TODO tag as unlikely branch
+    cudaGetLastError();  // This is the only way to clear the last error, which
+                         // we should do here since we're turning it into an
+                         // exception here
+    throw Experimental::CudaRawMemoryAllocationFailure(
+        arg_alloc_size, error_code,
+        Experimental::RawMemoryAllocationFailure::AllocationMechanism::
+            CudaHostAlloc);
+  }
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+  return ptr;
+}
+
+// </editor-fold> end allocate() }}}1
+//==============================================================================
+void CudaSpace::deallocate(void *const arg_alloc_ptr,
+                           const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void CudaSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
+                           const size_t arg_alloc_size,
+                           const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void CudaSpace::impl_deallocate(
+    const char *arg_label, void *const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+  try {
+#ifndef CUDART_VERSION
+#error CUDART_VERSION undefined!
+#elif (defined(KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC) && CUDART_VERSION >= 11020)
+    if (arg_alloc_size >= memory_threshold_g) {
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeAsync(arg_alloc_ptr, 0));
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
+    } else {
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
+    }
+#else
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
+#endif
+  } catch (...) {
+  }
+}
+void CudaUVMSpace::deallocate(void *const arg_alloc_ptr,
+                              const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void CudaUVMSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
+                              const size_t arg_alloc_size
+
+                              ,
+                              const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void CudaUVMSpace::impl_deallocate(
+    const char *arg_label, void *const arg_alloc_ptr,
+    const size_t arg_alloc_size
+
+    ,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  Cuda::impl_static_fence(
+      "Kokkos::CudaUVMSpace::impl_deallocate: Pre UVM Deallocation");
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+  try {
+    if (arg_alloc_ptr != nullptr) {
+      Kokkos::Impl::num_uvm_allocations--;
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(arg_alloc_ptr));
+    }
+  } catch (...) {
+  }
+  Cuda::impl_static_fence(
+      "Kokkos::CudaUVMSpace::impl_deallocate: Post UVM Deallocation");
+}
+
+void CudaHostPinnedSpace::deallocate(void *const arg_alloc_ptr,
+                                     const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void CudaHostPinnedSpace::deallocate(const char *arg_label,
+                                     void *const arg_alloc_ptr,
+                                     const size_t arg_alloc_size,
+                                     const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+
+void CudaHostPinnedSpace::impl_deallocate(
+    const char *arg_label, void *const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+  try {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeHost(arg_alloc_ptr));
+  } catch (...) {
+  }
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_DEBUG
+SharedAllocationRecord<void, void>
+    SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record;
+
+SharedAllocationRecord<void, void>
+    SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record;
+
+SharedAllocationRecord<void, void>
+    SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::s_root_record;
+#endif
+
+::cudaTextureObject_t
+SharedAllocationRecord<Kokkos::CudaSpace, void>::attach_texture_object(
+    const unsigned sizeof_alias, void *const alloc_ptr,
+    size_t const alloc_size) {
+  enum { TEXTURE_BOUND_1D = 1u << 27 };
+
+  if ((alloc_ptr == nullptr) ||
+      (sizeof_alias * TEXTURE_BOUND_1D <= alloc_size)) {
+    std::ostringstream msg;
+    msg << "Kokkos::CudaSpace ERROR: Cannot attach texture object to"
+        << " alloc_ptr(" << alloc_ptr << ")"
+        << " alloc_size(" << alloc_size << ")"
+        << " max_size(" << (sizeof_alias * TEXTURE_BOUND_1D) << ")";
+    std::cerr << msg.str() << std::endl;
+    std::cerr.flush();
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+
+  ::cudaTextureObject_t tex_obj;
+
+  struct cudaResourceDesc resDesc;
+  struct cudaTextureDesc texDesc;
+
+  memset(&resDesc, 0, sizeof(resDesc));
+  memset(&texDesc, 0, sizeof(texDesc));
+
+  resDesc.resType = cudaResourceTypeLinear;
+  resDesc.res.linear.desc =
+      (sizeof_alias == 4
+           ? cudaCreateChannelDesc<int>()
+           : (sizeof_alias == 8
+                  ? cudaCreateChannelDesc< ::int2>()
+                  :
+                  /* sizeof_alias == 16 */ cudaCreateChannelDesc< ::int4>()));
+  resDesc.res.linear.sizeInBytes = alloc_size;
+  resDesc.res.linear.devPtr      = alloc_ptr;
+
+  KOKKOS_IMPL_CUDA_SAFE_CALL(
+      cudaCreateTextureObject(&tex_obj, &resDesc, &texDesc, nullptr));
+
+  return tex_obj;
+}
+
+//==============================================================================
+// <editor-fold desc="SharedAllocationRecord destructors"> {{{1
+
+SharedAllocationRecord<Kokkos::CudaSpace, void>::~SharedAllocationRecord() {
+  auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     alloc_size, (alloc_size - sizeof(SharedAllocationHeader)));
+}
+
+void SharedAllocationRecord<Kokkos::CudaSpace, void>::deep_copy_header_no_exec(
+    void *ptr, const void *header) {
+  Kokkos::Cuda exec;
+  Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(exec, ptr, header,
+                                               sizeof(SharedAllocationHeader));
+  exec.fence(
+      "SharedAllocationRecord<Kokkos::CudaSpace, "
+      "void>::SharedAllocationRecord(): fence after copying header from "
+      "HostSpace");
+}
+
+SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::~SharedAllocationRecord() {
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     SharedAllocationRecord<void, void>::m_alloc_size,
+                     (SharedAllocationRecord<void, void>::m_alloc_size -
+                      sizeof(SharedAllocationHeader)));
+}
+
+SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
+                       void>::~SharedAllocationRecord() {
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     SharedAllocationRecord<void, void>::m_alloc_size,
+                     (SharedAllocationRecord<void, void>::m_alloc_size -
+                      sizeof(SharedAllocationHeader)));
+}
+
+// </editor-fold> end SharedAllocationRecord destructors }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="SharedAllocationRecord constructors"> {{{1
+
+SharedAllocationRecord<Kokkos::CudaSpace, void>::SharedAllocationRecord(
+    const Kokkos::CudaSpace &arg_space, const std::string &arg_label,
+    const size_t arg_alloc_size,
+    const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_tex_obj(0),
+      m_space(arg_space) {
+
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, arg_label);
+
+  // Copy to device memory
+  Kokkos::Cuda exec;
+  Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(
+      exec, RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
+  exec.fence(
+      "SharedAllocationRecord<Kokkos::CudaSpace, "
+      "void>::SharedAllocationRecord(): fence after copying header from "
+      "HostSpace");
+}
+
+SharedAllocationRecord<Kokkos::CudaSpace, void>::SharedAllocationRecord(
+    const Kokkos::Cuda &arg_exec_space, const Kokkos::CudaSpace &arg_space,
+    const std::string &arg_label, const size_t arg_alloc_size,
+    const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_exec_space, arg_space,
+                                               arg_label, arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_tex_obj(0),
+      m_space(arg_space) {
+
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, arg_label);
+
+  // Copy to device memory
+  Kokkos::Impl::DeepCopy<CudaSpace, HostSpace>(arg_exec_space,
+                                               RecordBase::m_alloc_ptr, &header,
+                                               sizeof(SharedAllocationHeader));
+}
+
+SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::SharedAllocationRecord(
+    const Kokkos::CudaUVMSpace &arg_space, const std::string &arg_label,
+    const size_t arg_alloc_size,
+    const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_tex_obj(0),
+      m_space(arg_space) {
+  this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                  arg_label);
+}
+
+SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::CudaHostPinnedSpace &arg_space,
+        const std::string &arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
+                                  void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+  this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                  arg_label);
+}
+
+// </editor-fold> end SharedAllocationRecord constructors }}}1
+//==============================================================================
+
+void cuda_prefetch_pointer(const Cuda &space, const void *ptr, size_t bytes,
+                           bool to_device) {
+  if ((ptr == nullptr) || (bytes == 0)) return;
+  cudaPointerAttributes attr;
+  KOKKOS_IMPL_CUDA_SAFE_CALL(cudaPointerGetAttributes(&attr, ptr));
+  // I measured this and it turns out prefetching towards the host slows
+  // DualView syncs down. Probably because the latency is not too bad in the
+  // first place for the pull down. If we want to change that provde
+  // cudaCpuDeviceId as the device if to_device is false
+#if CUDA_VERSION < 10000
+  bool is_managed = attr.isManaged;
+#else
+  bool is_managed = attr.type == cudaMemoryTypeManaged;
+#endif
+  if (to_device && is_managed &&
+      space.cuda_device_prop().concurrentManagedAccess) {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemPrefetchAsync(
+        ptr, bytes, space.cuda_device(), space.cuda_stream()));
+  }
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// To avoid additional compilation cost for something that's (mostly?) not
+// performance sensitive, we explicity instantiate these CRTP base classes here,
+// where we have access to the associated *_timpl.hpp header files.
+template class SharedAllocationRecordCommon<Kokkos::CudaSpace>;
+template class HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace>;
+template class SharedAllocationRecordCommon<Kokkos::CudaUVMSpace>;
+template class SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace>;
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
+
+#else
+void KOKKOS_CORE_SRC_CUDA_CUDASPACE_PREVENT_LINK_ERROR() {}
+#endif  // KOKKOS_ENABLE_CUDA
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Alloc.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Alloc.hpp
new file mode 100644 (file)
index 0000000..e76133f
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_ALLOCATION_TRACKING_HPP
+#define KOKKOS_CUDA_ALLOCATION_TRACKING_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <impl/Kokkos_Traits.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class DestructFunctor>
+SharedAllocationRecord* shared_allocation_record(
+    Kokkos::CudaSpace const& arg_space, void* const arg_alloc_ptr,
+    DestructFunctor const& arg_destruct) {
+  SharedAllocationRecord* const record =
+      SharedAllocationRecord::get_record(arg_alloc_ptr);
+
+  // assert: record != 0
+
+  // assert: sizeof(DestructFunctor) <= record->m_destruct_size
+
+  // assert: record->m_destruct_function == 0
+
+  DestructFunctor* const functor = reinterpret_cast<DestructFunctor*>(
+      reinterpret_cast<uintptr_t>(record) + sizeof(SharedAllocationRecord));
+
+  new (functor) DestructFunctor(arg_destruct);
+
+  record->m_destruct_functor = &shared_allocation_destroy<DestructFunctor>;
+
+  return record;
+}
+
+/// class CudaUnmanagedAllocator
+/// does nothing when deallocate(ptr,size) is called
+struct CudaUnmanagedAllocator {
+  static const char* name() { return "Cuda Unmanaged Allocator"; }
+
+  static void deallocate(void* /*ptr*/, size_t /*size*/) {}
+
+  static bool support_texture_binding() { return true; }
+};
+
+/// class CudaUnmanagedAllocator
+/// does nothing when deallocate(ptr,size) is called
+struct CudaUnmanagedUVMAllocator {
+  static const char* name() { return "Cuda Unmanaged UVM Allocator"; }
+
+  static void deallocate(void* /*ptr*/, size_t /*size*/) {}
+
+  static bool support_texture_binding() { return true; }
+};
+
+/// class CudaUnmanagedHostAllocator
+/// does nothing when deallocate(ptr,size) is called
+class CudaUnmanagedHostAllocator {
+ public:
+  static const char* name() { return "Cuda Unmanaged Host Allocator"; }
+  // Unmanaged deallocate does nothing
+  static void deallocate(void* /*ptr*/, size_t /*size*/) {}
+};
+
+/// class CudaMallocAllocator
+class CudaMallocAllocator {
+ public:
+  static const char* name() { return "Cuda Malloc Allocator"; }
+
+  static void* allocate(size_t size);
+
+  static void deallocate(void* ptr, size_t);
+
+  static void* reallocate(void* old_ptr, size_t old_size, size_t new_size);
+
+  static bool support_texture_binding() { return true; }
+};
+
+/// class CudaUVMAllocator
+class CudaUVMAllocator {
+ public:
+  static const char* name() { return "Cuda UVM Allocator"; }
+
+  static void* allocate(size_t size);
+
+  static void deallocate(void* ptr, size_t);
+
+  static void* reallocate(void* old_ptr, size_t old_size, size_t new_size);
+
+  static bool support_texture_binding() { return true; }
+};
+
+/// class CudaHostAllocator
+class CudaHostAllocator {
+ public:
+  static const char* name() { return "Cuda Host Allocator"; }
+
+  static void* allocate(size_t size);
+
+  static void deallocate(void* ptr, size_t);
+
+  static void* reallocate(void* old_ptr, size_t old_size, size_t new_size);
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_ENABLE_CUDA
+
+#endif  // #ifndef KOKKOS_CUDA_ALLOCATION_TRACKING_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp
new file mode 100644 (file)
index 0000000..8e8dff6
--- /dev/null
@@ -0,0 +1,1003 @@
+/*
+@HEADER
+================================================================================
+
+ORIGINAL LICENSE
+----------------
+
+Copyright (c) 2018, NVIDIA Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+================================================================================
+
+LICENSE ASSOCIATED WITH SUBSEQUENT MODIFICATIONS
+------------------------------------------------
+
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+@HEADER
+*/
+
+#include <Kokkos_Macros.hpp>
+#if defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+
+#include <cassert>
+
+#ifndef _SIMT_DETAILS_CONFIG
+#define _SIMT_DETAILS_CONFIG
+
+namespace Kokkos {
+namespace Impl {
+
+#ifndef __simt_scope
+// Modification: Kokkos GPU atomics should default to `gpu` scope
+#define __simt_scope "gpu"
+#endif
+
+#define __simt_fence_signal_() asm volatile("" ::: "memory")
+#define __simt_fence_sc_() \
+  asm volatile("fence.sc." __simt_scope ";" ::: "memory")
+#define __simt_fence_() asm volatile("fence." __simt_scope ";" ::: "memory")
+
+#define __simt_load_acquire_8_as_32(ptr, ret)             \
+  asm volatile("ld.acquire." __simt_scope ".b8 %0, [%1];" \
+               : "=r"(ret)                                \
+               : "l"(ptr)                                 \
+               : "memory")
+#define __simt_load_relaxed_8_as_32(ptr, ret)             \
+  asm volatile("ld.relaxed." __simt_scope ".b8 %0, [%1];" \
+               : "=r"(ret)                                \
+               : "l"(ptr)                                 \
+               : "memory")
+#define __simt_store_release_8_as_32(ptr, desired)                    \
+  asm volatile("st.release." __simt_scope ".b8 [%0], %1;" ::"l"(ptr), \
+               "r"(desired)                                           \
+               : "memory")
+#define __simt_store_relaxed_8_as_32(ptr, desired)                    \
+  asm volatile("st.relaxed." __simt_scope ".b8 [%0], %1;" ::"l"(ptr), \
+               "r"(desired)                                           \
+               : "memory")
+
+#define __simt_load_acquire_16(ptr, ret)                   \
+  asm volatile("ld.acquire." __simt_scope ".b16 %0, [%1];" \
+               : "=h"(ret)                                 \
+               : "l"(ptr)                                  \
+               : "memory")
+#define __simt_load_relaxed_16(ptr, ret)                   \
+  asm volatile("ld.relaxed." __simt_scope ".b16 %0, [%1];" \
+               : "=h"(ret)                                 \
+               : "l"(ptr)                                  \
+               : "memory")
+#define __simt_store_release_16(ptr, desired)                          \
+  asm volatile("st.release." __simt_scope ".b16 [%0], %1;" ::"l"(ptr), \
+               "h"(desired)                                            \
+               : "memory")
+#define __simt_store_relaxed_16(ptr, desired)                          \
+  asm volatile("st.relaxed." __simt_scope ".b16 [%0], %1;" ::"l"(ptr), \
+               "h"(desired)                                            \
+               : "memory")
+
+#define __simt_load_acquire_32(ptr, ret)                   \
+  asm volatile("ld.acquire." __simt_scope ".b32 %0, [%1];" \
+               : "=r"(ret)                                 \
+               : "l"(ptr)                                  \
+               : "memory")
+#define __simt_load_relaxed_32(ptr, ret)                   \
+  asm volatile("ld.relaxed." __simt_scope ".b32 %0, [%1];" \
+               : "=r"(ret)                                 \
+               : "l"(ptr)                                  \
+               : "memory")
+#define __simt_store_release_32(ptr, desired)                          \
+  asm volatile("st.release." __simt_scope ".b32 [%0], %1;" ::"l"(ptr), \
+               "r"(desired)                                            \
+               : "memory")
+#define __simt_store_relaxed_32(ptr, desired)                          \
+  asm volatile("st.relaxed." __simt_scope ".b32 [%0], %1;" ::"l"(ptr), \
+               "r"(desired)                                            \
+               : "memory")
+#define __simt_exch_release_32(ptr, old, desired)                     \
+  asm volatile("atom.exch.release." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                            \
+               : "l"(ptr), "r"(desired)                               \
+               : "memory")
+#define __simt_exch_acquire_32(ptr, old, desired)                     \
+  asm volatile("atom.exch.acquire." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                            \
+               : "l"(ptr), "r"(desired)                               \
+               : "memory")
+#define __simt_exch_acq_rel_32(ptr, old, desired)                     \
+  asm volatile("atom.exch.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                            \
+               : "l"(ptr), "r"(desired)                               \
+               : "memory")
+#define __simt_exch_relaxed_32(ptr, old, desired)                     \
+  asm volatile("atom.exch.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                            \
+               : "l"(ptr), "r"(desired)                               \
+               : "memory")
+#define __simt_cas_release_32(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.release." __simt_scope ".b32 %0, [%1], %2, %3;" \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define __simt_cas_acquire_32(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.acquire." __simt_scope ".b32 %0, [%1], %2, %3;" \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define __simt_cas_acq_rel_32(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.acq_rel." __simt_scope ".b32 %0, [%1], %2, %3;" \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define __simt_cas_relaxed_32(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.relaxed." __simt_scope ".b32 %0, [%1], %2, %3;" \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define __simt_add_release_32(ptr, old, addend)                      \
+  asm volatile("atom.add.release." __simt_scope ".u32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(addend)                               \
+               : "memory")
+#define __simt_add_acquire_32(ptr, old, addend)                      \
+  asm volatile("atom.add.acquire." __simt_scope ".u32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(addend)                               \
+               : "memory")
+#define __simt_add_acq_rel_32(ptr, old, addend)                      \
+  asm volatile("atom.add.acq_rel." __simt_scope ".u32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(addend)                               \
+               : "memory")
+#define __simt_add_relaxed_32(ptr, old, addend)                      \
+  asm volatile("atom.add.relaxed." __simt_scope ".u32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(addend)                               \
+               : "memory")
+#define __simt_and_release_32(ptr, old, andend)                      \
+  asm volatile("atom.and.release." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(andend)                               \
+               : "memory")
+#define __simt_and_acquire_32(ptr, old, andend)                      \
+  asm volatile("atom.and.acquire." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(andend)                               \
+               : "memory")
+#define __simt_and_acq_rel_32(ptr, old, andend)                      \
+  asm volatile("atom.and.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(andend)                               \
+               : "memory")
+#define __simt_and_relaxed_32(ptr, old, andend)                      \
+  asm volatile("atom.and.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(andend)                               \
+               : "memory")
+#define __simt_or_release_32(ptr, old, orend)                       \
+  asm volatile("atom.or.release." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                          \
+               : "l"(ptr), "r"(orend)                               \
+               : "memory")
+#define __simt_or_acquire_32(ptr, old, orend)                       \
+  asm volatile("atom.or.acquire." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                          \
+               : "l"(ptr), "r"(orend)                               \
+               : "memory")
+#define __simt_or_acq_rel_32(ptr, old, orend)                       \
+  asm volatile("atom.or.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                          \
+               : "l"(ptr), "r"(orend)                               \
+               : "memory")
+#define __simt_or_relaxed_32(ptr, old, orend)                       \
+  asm volatile("atom.or.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                          \
+               : "l"(ptr), "r"(orend)                               \
+               : "memory")
+#define __simt_xor_release_32(ptr, old, xorend)                      \
+  asm volatile("atom.xor.release." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(xorend)                               \
+               : "memory")
+#define __simt_xor_acquire_32(ptr, old, xorend)                      \
+  asm volatile("atom.xor.acquire." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(xorend)                               \
+               : "memory")
+#define __simt_xor_acq_rel_32(ptr, old, xorend)                      \
+  asm volatile("atom.xor.acq_rel." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(xorend)                               \
+               : "memory")
+#define __simt_xor_relaxed_32(ptr, old, xorend)                      \
+  asm volatile("atom.xor.relaxed." __simt_scope ".b32 %0, [%1], %2;" \
+               : "=r"(old)                                           \
+               : "l"(ptr), "r"(xorend)                               \
+               : "memory")
+
+#define __simt_load_acquire_64(ptr, ret)                   \
+  asm volatile("ld.acquire." __simt_scope ".b64 %0, [%1];" \
+               : "=l"(ret)                                 \
+               : "l"(ptr)                                  \
+               : "memory")
+#define __simt_load_relaxed_64(ptr, ret)                   \
+  asm volatile("ld.relaxed." __simt_scope ".b64 %0, [%1];" \
+               : "=l"(ret)                                 \
+               : "l"(ptr)                                  \
+               : "memory")
+#define __simt_store_release_64(ptr, desired)                          \
+  asm volatile("st.release." __simt_scope ".b64 [%0], %1;" ::"l"(ptr), \
+               "l"(desired)                                            \
+               : "memory")
+#define __simt_store_relaxed_64(ptr, desired)                          \
+  asm volatile("st.relaxed." __simt_scope ".b64 [%0], %1;" ::"l"(ptr), \
+               "l"(desired)                                            \
+               : "memory")
+#define __simt_exch_release_64(ptr, old, desired)                     \
+  asm volatile("atom.exch.release." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                            \
+               : "l"(ptr), "l"(desired)                               \
+               : "memory")
+#define __simt_exch_acquire_64(ptr, old, desired)                     \
+  asm volatile("atom.exch.acquire." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                            \
+               : "l"(ptr), "l"(desired)                               \
+               : "memory")
+#define __simt_exch_acq_rel_64(ptr, old, desired)                     \
+  asm volatile("atom.exch.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                            \
+               : "l"(ptr), "l"(desired)                               \
+               : "memory")
+#define __simt_exch_relaxed_64(ptr, old, desired)                     \
+  asm volatile("atom.exch.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                            \
+               : "l"(ptr), "l"(desired)                               \
+               : "memory")
+#define __simt_cas_release_64(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.release." __simt_scope ".b64 %0, [%1], %2, %3;" \
+               : "=l"(old)                                               \
+               : "l"(ptr), "l"(expected), "l"(desired)                   \
+               : "memory")
+#define __simt_cas_acquire_64(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.acquire." __simt_scope ".b64 %0, [%1], %2, %3;" \
+               : "=l"(old)                                               \
+               : "l"(ptr), "l"(expected), "l"(desired)                   \
+               : "memory")
+#define __simt_cas_acq_rel_64(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.acq_rel." __simt_scope ".b64 %0, [%1], %2, %3;" \
+               : "=l"(old)                                               \
+               : "l"(ptr), "l"(expected), "l"(desired)                   \
+               : "memory")
+#define __simt_cas_relaxed_64(ptr, old, expected, desired)               \
+  asm volatile("atom.cas.relaxed." __simt_scope ".b64 %0, [%1], %2, %3;" \
+               : "=l"(old)                                               \
+               : "l"(ptr), "l"(expected), "l"(desired)                   \
+               : "memory")
+#define __simt_add_release_64(ptr, old, addend)                      \
+  asm volatile("atom.add.release." __simt_scope ".u64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(addend)                               \
+               : "memory")
+#define __simt_add_acquire_64(ptr, old, addend)                      \
+  asm volatile("atom.add.acquire." __simt_scope ".u64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(addend)                               \
+               : "memory")
+#define __simt_add_acq_rel_64(ptr, old, addend)                      \
+  asm volatile("atom.add.acq_rel." __simt_scope ".u64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(addend)                               \
+               : "memory")
+#define __simt_add_relaxed_64(ptr, old, addend)                      \
+  asm volatile("atom.add.relaxed." __simt_scope ".u64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(addend)                               \
+               : "memory")
+#define __simt_and_release_64(ptr, old, andend)                      \
+  asm volatile("atom.and.release." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(andend)                               \
+               : "memory")
+#define __simt_and_acquire_64(ptr, old, andend)                      \
+  asm volatile("atom.and.acquire." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(andend)                               \
+               : "memory")
+#define __simt_and_acq_rel_64(ptr, old, andend)                      \
+  asm volatile("atom.and.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(andend)                               \
+               : "memory")
+#define __simt_and_relaxed_64(ptr, old, andend)                      \
+  asm volatile("atom.and.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(andend)                               \
+               : "memory")
+#define __simt_or_release_64(ptr, old, orend)                       \
+  asm volatile("atom.or.release." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                          \
+               : "l"(ptr), "l"(orend)                               \
+               : "memory")
+#define __simt_or_acquire_64(ptr, old, orend)                       \
+  asm volatile("atom.or.acquire." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                          \
+               : "l"(ptr), "l"(orend)                               \
+               : "memory")
+#define __simt_or_acq_rel_64(ptr, old, orend)                       \
+  asm volatile("atom.or.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                          \
+               : "l"(ptr), "l"(orend)                               \
+               : "memory")
+#define __simt_or_relaxed_64(ptr, old, orend)                       \
+  asm volatile("atom.or.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                          \
+               : "l"(ptr), "l"(orend)                               \
+               : "memory")
+#define __simt_xor_release_64(ptr, old, xorend)                      \
+  asm volatile("atom.xor.release." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(xorend)                               \
+               : "memory")
+#define __simt_xor_acquire_64(ptr, old, xorend)                      \
+  asm volatile("atom.xor.acquire." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(xorend)                               \
+               : "memory")
+#define __simt_xor_acq_rel_64(ptr, old, xorend)                      \
+  asm volatile("atom.xor.acq_rel." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(xorend)                               \
+               : "memory")
+#define __simt_xor_relaxed_64(ptr, old, xorend)                      \
+  asm volatile("atom.xor.relaxed." __simt_scope ".b64 %0, [%1], %2;" \
+               : "=l"(old)                                           \
+               : "l"(ptr), "l"(xorend)                               \
+               : "memory")
+
+#define __simt_nanosleep(timeout) \
+  asm volatile("nanosleep.u32 %0;" ::"r"(unsigned(timeout)) :)
+
+/*
+    definitions
+*/
+
+#ifndef __GCC_ATOMIC_BOOL_LOCK_FREE
+#define __GCC_ATOMIC_BOOL_LOCK_FREE 2
+#define __GCC_ATOMIC_CHAR_LOCK_FREE 2
+#define __GCC_ATOMIC_CHAR16_T_LOCK_FREE 2
+#define __GCC_ATOMIC_CHAR32_T_LOCK_FREE 2
+#define __GCC_ATOMIC_WCHAR_T_LOCK_FREE 2
+#define __GCC_ATOMIC_SHORT_LOCK_FREE 2
+#define __GCC_ATOMIC_INT_LOCK_FREE 2
+#define __GCC_ATOMIC_LONG_LOCK_FREE 2
+#define __GCC_ATOMIC_LLONG_LOCK_FREE 2
+#define __GCC_ATOMIC_POINTER_LOCK_FREE 2
+#endif
+
+#ifndef __ATOMIC_RELAXED
+#define __ATOMIC_RELAXED 0
+#define __ATOMIC_CONSUME 1
+#define __ATOMIC_ACQUIRE 2
+#define __ATOMIC_RELEASE 3
+#define __ATOMIC_ACQ_REL 4
+#define __ATOMIC_SEQ_CST 5
+#endif
+
+inline __device__ int __stronger_order_simt_(int a, int b) {
+  if (b == __ATOMIC_SEQ_CST) return __ATOMIC_SEQ_CST;
+  if (b == __ATOMIC_RELAXED) return a;
+  switch (a) {
+    case __ATOMIC_SEQ_CST:
+    case __ATOMIC_ACQ_REL: return a;
+    case __ATOMIC_CONSUME:
+    case __ATOMIC_ACQUIRE:
+      if (b != __ATOMIC_ACQUIRE)
+        return __ATOMIC_ACQ_REL;
+      else
+        return __ATOMIC_ACQUIRE;
+    case __ATOMIC_RELEASE:
+      if (b != __ATOMIC_RELEASE)
+        return __ATOMIC_ACQ_REL;
+      else
+        return __ATOMIC_RELEASE;
+    case __ATOMIC_RELAXED: return b;
+    default: assert(0);
+  }
+  return __ATOMIC_SEQ_CST;
+}
+
+/*
+    base
+*/
+
+#define DO__atomic_load_simt_(bytes, bits)                                \
+  template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
+  void __device__ __atomic_load_simt_(const type *ptr, type *ret,         \
+                                      int memorder) {                     \
+    int##bits##_t tmp = 0;                                                \
+    switch (memorder) {                                                   \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                          \
+      case __ATOMIC_CONSUME:                                              \
+      case __ATOMIC_ACQUIRE: __simt_load_acquire_##bits(ptr, tmp); break; \
+      case __ATOMIC_RELAXED: __simt_load_relaxed_##bits(ptr, tmp); break; \
+      default: assert(0);                                                 \
+    }                                                                     \
+    memcpy(ret, &tmp, bytes);                                             \
+  }
+DO__atomic_load_simt_(1, 32) DO__atomic_load_simt_(2, 16)
+    DO__atomic_load_simt_(4, 32) DO__atomic_load_simt_(8, 64)
+
+        template <class type>
+        type __device__ __atomic_load_n_simt_(const type *ptr, int memorder) {
+  type ret;
+  __atomic_load_simt_(ptr, &ret, memorder);
+  return ret;
+}
+
+#define DO__atomic_store_simt_(bytes, bits)                                  \
+  template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0>    \
+  void __device__ __atomic_store_simt_(type *ptr, type *val, int memorder) { \
+    int##bits##_t tmp = 0;                                                   \
+    memcpy(&tmp, val, bytes);                                                \
+    switch (memorder) {                                                      \
+      case __ATOMIC_RELEASE: __simt_store_release_##bits(ptr, tmp); break;   \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                             \
+      case __ATOMIC_RELAXED: __simt_store_relaxed_##bits(ptr, tmp); break;   \
+      default: assert(0);                                                    \
+    }                                                                        \
+  }
+DO__atomic_store_simt_(1, 32) DO__atomic_store_simt_(2, 16)
+    DO__atomic_store_simt_(4, 32) DO__atomic_store_simt_(8, 64)
+
+        template <class type>
+        void __device__
+    __atomic_store_n_simt_(type *ptr, type val, int memorder) {
+  __atomic_store_simt_(ptr, &val, memorder);
+}
+
+#define DO__atomic_compare_exchange_simt_(bytes, bits)                    \
+  template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0> \
+  bool __device__ __atomic_compare_exchange_simt_(                        \
+      type *ptr, type *expected, const type *desired, bool,               \
+      int success_memorder, int failure_memorder) {                       \
+    int##bits##_t tmp = 0, old = 0, old_tmp;                              \
+    memcpy(&tmp, desired, bytes);                                         \
+    memcpy(&old, expected, bytes);                                        \
+    old_tmp = old;                                                        \
+    switch (__stronger_order_simt_(success_memorder, failure_memorder)) { \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                          \
+      case __ATOMIC_CONSUME:                                              \
+      case __ATOMIC_ACQUIRE:                                              \
+        __simt_cas_acquire_##bits(ptr, old, old_tmp, tmp);                \
+        break;                                                            \
+      case __ATOMIC_ACQ_REL:                                              \
+        __simt_cas_acq_rel_##bits(ptr, old, old_tmp, tmp);                \
+        break;                                                            \
+      case __ATOMIC_RELEASE:                                              \
+        __simt_cas_release_##bits(ptr, old, old_tmp, tmp);                \
+        break;                                                            \
+      case __ATOMIC_RELAXED:                                              \
+        __simt_cas_relaxed_##bits(ptr, old, old_tmp, tmp);                \
+        break;                                                            \
+      default: assert(0);                                                 \
+    }                                                                     \
+    bool const ret = old == old_tmp;                                      \
+    memcpy(expected, &old, bytes);                                        \
+    return ret;                                                           \
+  }
+DO__atomic_compare_exchange_simt_(4, 32)
+    DO__atomic_compare_exchange_simt_(8, 64)
+
+        template <class type, std::enable_if_t<sizeof(type) <= 2, int> = 0>
+        bool __device__
+    __atomic_compare_exchange_simt_(type *ptr, type *expected,
+                                    const type *desired, bool,
+                                    int success_memorder,
+                                    int failure_memorder) {
+  using R = std::conditional_t<std::is_volatile<type>::value, volatile uint32_t,
+                               uint32_t>;
+  auto const aligned = (R *)((intptr_t)ptr & ~(sizeof(uint32_t) - 1));
+  auto const offset  = uint32_t((intptr_t)ptr & (sizeof(uint32_t) - 1)) * 8;
+  auto const mask    = ((1 << sizeof(type) * 8) - 1) << offset;
+
+  uint32_t old = *expected << offset, old_value;
+  while (1) {
+    old_value = (old & mask) >> offset;
+    if (old_value != *expected) break;
+    uint32_t const attempt = (old & ~mask) | (*desired << offset);
+    if (__atomic_compare_exchange_simt_(aligned, &old, &attempt, true,
+                                        success_memorder, failure_memorder))
+      return true;
+  }
+  *expected = old_value;
+  return false;
+}
+
+template <class type>
+bool __device__ __atomic_compare_exchange_n_simt_(type *ptr, type *expected,
+                                                  type desired, bool weak,
+                                                  int success_memorder,
+                                                  int failure_memorder) {
+  return __atomic_compare_exchange_simt_(ptr, expected, &desired, weak,
+                                         success_memorder, failure_memorder);
+}
+
+#define DO__atomic_exchange_simt_(bytes, bits)                                 \
+  template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0>      \
+  void __device__ __atomic_exchange_simt_(type *ptr, type *val, type *ret,     \
+                                          int memorder) {                      \
+    int##bits##_t tmp = 0;                                                     \
+    memcpy(&tmp, val, bytes);                                                  \
+    switch (memorder) {                                                        \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                               \
+      case __ATOMIC_CONSUME:                                                   \
+      case __ATOMIC_ACQUIRE: __simt_exch_acquire_##bits(ptr, tmp, tmp); break; \
+      case __ATOMIC_ACQ_REL: __simt_exch_acq_rel_##bits(ptr, tmp, tmp); break; \
+      case __ATOMIC_RELEASE: __simt_exch_release_##bits(ptr, tmp, tmp); break; \
+      case __ATOMIC_RELAXED: __simt_exch_relaxed_##bits(ptr, tmp, tmp); break; \
+      default: assert(0);                                                      \
+    }                                                                          \
+    memcpy(ret, &tmp, bytes);                                                  \
+  }
+DO__atomic_exchange_simt_(4, 32) DO__atomic_exchange_simt_(8, 64)
+
+    template <class type, std::enable_if_t<sizeof(type) <= 2, int> = 0>
+    void __device__
+    __atomic_exchange_simt_(type *ptr, type *val, type *ret, int memorder) {
+  type expected = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
+  while (!__atomic_compare_exchange_simt_(ptr, &expected, val, true, memorder,
+                                          memorder))
+    ;
+  *ret = expected;
+}
+
+template <class type>
+type __device__ __atomic_exchange_n_simt_(type *ptr, type val, int memorder) {
+  type ret;
+  __atomic_exchange_simt_(ptr, &val, &ret, memorder);
+  return ret;
+}
+
+#define DO__atomic_fetch_add_simt_(bytes, bits)                               \
+  template <class type, class delta,                                          \
+            std::enable_if_t<sizeof(type) == bytes, int> = 0>                 \
+  type __device__ __atomic_fetch_add_simt_(type *ptr, delta val,              \
+                                           int memorder) {                    \
+    type ret;                                                                 \
+    switch (memorder) {                                                       \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                              \
+      case __ATOMIC_CONSUME:                                                  \
+      case __ATOMIC_ACQUIRE: __simt_add_acquire_##bits(ptr, ret, val); break; \
+      case __ATOMIC_ACQ_REL: __simt_add_acq_rel_##bits(ptr, ret, val); break; \
+      case __ATOMIC_RELEASE: __simt_add_release_##bits(ptr, ret, val); break; \
+      case __ATOMIC_RELAXED: __simt_add_relaxed_##bits(ptr, ret, val); break; \
+      default: assert(0);                                                     \
+    }                                                                         \
+    return ret;                                                               \
+  }
+DO__atomic_fetch_add_simt_(4, 32) DO__atomic_fetch_add_simt_(8, 64)
+
+    template <class type, class delta,
+              std::enable_if_t<sizeof(type) <= 2, int> = 0>
+    type __device__
+    __atomic_fetch_add_simt_(type *ptr, delta val, int memorder) {
+  type expected      = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
+  type const desired = expected + val;
+  while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
+                                          memorder, memorder))
+    ;
+  return expected;
+}
+
+#define DO__atomic_fetch_sub_simt_(bytes, bits)                                \
+  template <class type, class delta,                                           \
+            std::enable_if_t<sizeof(type) == bytes, int> = 0>                  \
+  type __device__ __atomic_fetch_sub_simt_(type *ptr, delta val,               \
+                                           int memorder) {                     \
+    type ret;                                                                  \
+    switch (memorder) {                                                        \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                               \
+      case __ATOMIC_CONSUME:                                                   \
+      case __ATOMIC_ACQUIRE: __simt_add_acquire_##bits(ptr, ret, -val); break; \
+      case __ATOMIC_ACQ_REL: __simt_add_acq_rel_##bits(ptr, ret, -val); break; \
+      case __ATOMIC_RELEASE: __simt_add_release_##bits(ptr, ret, -val); break; \
+      case __ATOMIC_RELAXED: __simt_add_relaxed_##bits(ptr, ret, -val); break; \
+      default: assert(0);                                                      \
+    }                                                                          \
+    return ret;                                                                \
+  }
+DO__atomic_fetch_sub_simt_(4, 32) DO__atomic_fetch_sub_simt_(8, 64)
+
+    template <class type, class delta,
+              std::enable_if_t<sizeof(type) <= 2, int> = 0>
+    type __device__
+    __atomic_fetch_sub_simt_(type *ptr, delta val, int memorder) {
+  type expected      = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
+  type const desired = expected - val;
+  while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
+                                          memorder, memorder))
+    ;
+  return expected;
+}
+
+#define DO__atomic_fetch_and_simt_(bytes, bits)                               \
+  template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0>     \
+  type __device__ __atomic_fetch_and_simt_(type *ptr, type val,               \
+                                           int memorder) {                    \
+    type ret;                                                                 \
+    switch (memorder) {                                                       \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                              \
+      case __ATOMIC_CONSUME:                                                  \
+      case __ATOMIC_ACQUIRE: __simt_and_acquire_##bits(ptr, ret, val); break; \
+      case __ATOMIC_ACQ_REL: __simt_and_acq_rel_##bits(ptr, ret, val); break; \
+      case __ATOMIC_RELEASE: __simt_and_release_##bits(ptr, ret, val); break; \
+      case __ATOMIC_RELAXED: __simt_and_relaxed_##bits(ptr, ret, val); break; \
+      default: assert(0);                                                     \
+    }                                                                         \
+    return ret;                                                               \
+  }
+DO__atomic_fetch_and_simt_(4, 32) DO__atomic_fetch_and_simt_(8, 64)
+
+    template <class type, class delta,
+              std::enable_if_t<sizeof(type) <= 2, int> = 0>
+    type __device__
+    __atomic_fetch_and_simt_(type *ptr, delta val, int memorder) {
+  type expected      = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
+  type const desired = expected & val;
+  while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
+                                          memorder, memorder))
+    ;
+  return expected;
+}
+
+#define DO__atomic_fetch_xor_simt_(bytes, bits)                               \
+  template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0>     \
+  type __device__ __atomic_fetch_xor_simt_(type *ptr, type val,               \
+                                           int memorder) {                    \
+    type ret;                                                                 \
+    switch (memorder) {                                                       \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                              \
+      case __ATOMIC_CONSUME:                                                  \
+      case __ATOMIC_ACQUIRE: __simt_xor_acquire_##bits(ptr, ret, val); break; \
+      case __ATOMIC_ACQ_REL: __simt_xor_acq_rel_##bits(ptr, ret, val); break; \
+      case __ATOMIC_RELEASE: __simt_xor_release_##bits(ptr, ret, val); break; \
+      case __ATOMIC_RELAXED: __simt_xor_relaxed_##bits(ptr, ret, val); break; \
+      default: assert(0);                                                     \
+    }                                                                         \
+    return ret;                                                               \
+  }
+DO__atomic_fetch_xor_simt_(4, 32) DO__atomic_fetch_xor_simt_(8, 64)
+
+    template <class type, class delta,
+              std::enable_if_t<sizeof(type) <= 2, int> = 0>
+    type __device__
+    __atomic_fetch_xor_simt_(type *ptr, delta val, int memorder) {
+  type expected      = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
+  type const desired = expected ^ val;
+  while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
+                                          memorder, memorder))
+    ;
+  return expected;
+}
+
+#define DO__atomic_fetch_or_simt_(bytes, bits)                                 \
+  template <class type, std::enable_if_t<sizeof(type) == bytes, int> = 0>      \
+  type __device__ __atomic_fetch_or_simt_(type *ptr, type val, int memorder) { \
+    type ret;                                                                  \
+    switch (memorder) {                                                        \
+      case __ATOMIC_SEQ_CST: __simt_fence_sc_();                               \
+      case __ATOMIC_CONSUME:                                                   \
+      case __ATOMIC_ACQUIRE: __simt_or_acquire_##bits(ptr, ret, val); break;   \
+      case __ATOMIC_ACQ_REL: __simt_or_acq_rel_##bits(ptr, ret, val); break;   \
+      case __ATOMIC_RELEASE: __simt_or_release_##bits(ptr, ret, val); break;   \
+      case __ATOMIC_RELAXED: __simt_or_relaxed_##bits(ptr, ret, val); break;   \
+      default: assert(0);                                                      \
+    }                                                                          \
+    return ret;                                                                \
+  }
+DO__atomic_fetch_or_simt_(4, 32) DO__atomic_fetch_or_simt_(8, 64)
+
+    template <class type, class delta,
+              std::enable_if_t<sizeof(type) <= 2, int> = 0>
+    type __device__
+    __atomic_fetch_or_simt_(type *ptr, delta val, int memorder) {
+  type expected      = __atomic_load_n_simt_(ptr, __ATOMIC_RELAXED);
+  type const desired = expected | val;
+  while (!__atomic_compare_exchange_simt_(ptr, &expected, &desired, true,
+                                          memorder, memorder))
+    ;
+  return expected;
+}
+
+template <class type>
+inline bool __device__ __atomic_test_and_set_simt_(type *ptr, int memorder) {
+  return __atomic_exchange_n_simt_((char *)ptr, (char)1, memorder) == 1;
+}
+template <class type>
+inline void __device__ __atomic_clear_simt_(type *ptr, int memorder) {
+  return __atomic_store_n_simt_((char *)ptr, (char)0, memorder);
+}
+
+inline constexpr __device__ bool __atomic_always_lock_free_simt_(size_t size,
+                                                                 void *) {
+  return size <= 8;
+}
+inline __device__ bool __atomic_is_lock_free_simt_(size_t size, void *ptr) {
+  return __atomic_always_lock_free_simt_(size, ptr);
+}
+
+/*
+    fences
+*/
+
+inline void __device__ __atomic_thread_fence_simt(int memorder) {
+  switch (memorder) {
+    case __ATOMIC_SEQ_CST: __simt_fence_sc_(); break;
+    case __ATOMIC_CONSUME:
+    case __ATOMIC_ACQUIRE:
+    case __ATOMIC_ACQ_REL:
+    case __ATOMIC_RELEASE: __simt_fence_(); break;
+    case __ATOMIC_RELAXED: break;
+    default: assert(0);
+  }
+}
+inline void __device__ __atomic_signal_fence_simt(int memorder) {
+  __atomic_thread_fence_simt(memorder);
+}
+
+/*
+    non-volatile
+*/
+
+template <class type>
+type __device__ __atomic_load_n_simt(const type *ptr, int memorder) {
+  return __atomic_load_n_simt_(const_cast<const type *>(ptr), memorder);
+}
+template <class type>
+void __device__ __atomic_load_simt(const type *ptr, type *ret, int memorder) {
+  __atomic_load_simt_(const_cast<const type *>(ptr), ret, memorder);
+}
+template <class type>
+void __device__ __atomic_store_n_simt(type *ptr, type val, int memorder) {
+  __atomic_store_n_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+void __device__ __atomic_store_simt(type *ptr, type *val, int memorder) {
+  __atomic_store_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_exchange_n_simt(type *ptr, type val, int memorder) {
+  return __atomic_exchange_n_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+void __device__ __atomic_exchange_simt(type *ptr, type *val, type *ret,
+                                       int memorder) {
+  __atomic_exchange_simt_(const_cast<type *>(ptr), val, ret, memorder);
+}
+template <class type>
+bool __device__ __atomic_compare_exchange_n_simt(type *ptr, type *expected,
+                                                 type desired, bool weak,
+                                                 int success_memorder,
+                                                 int failure_memorder) {
+  return __atomic_compare_exchange_n_simt_(const_cast<type *>(ptr), expected,
+                                           desired, weak, success_memorder,
+                                           failure_memorder);
+}
+template <class type>
+bool __device__ __atomic_compare_exchange_simt(type *ptr, type *expected,
+                                               type *desired, bool weak,
+                                               int success_memorder,
+                                               int failure_memorder) {
+  return __atomic_compare_exchange_simt_(const_cast<type *>(ptr), expected,
+                                         desired, weak, success_memorder,
+                                         failure_memorder);
+}
+template <class type, class delta>
+type __device__ __atomic_fetch_add_simt(type *ptr, delta val, int memorder) {
+  return __atomic_fetch_add_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type, class delta>
+type __device__ __atomic_fetch_sub_simt(type *ptr, delta val, int memorder) {
+  return __atomic_fetch_sub_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_fetch_and_simt(type *ptr, type val, int memorder) {
+  return __atomic_fetch_and_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_fetch_xor_simt(type *ptr, type val, int memorder) {
+  return __atomic_fetch_xor_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_fetch_or_simt(type *ptr, type val, int memorder) {
+  return __atomic_fetch_or_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+bool __device__ __atomic_test_and_set_simt(void *ptr, int memorder) {
+  return __atomic_test_and_set_simt_(const_cast<void *>(ptr), memorder);
+}
+template <class type>
+void __device__ __atomic_clear_simt(void *ptr, int memorder) {
+  return __atomic_clear_simt_(const_cast<void *>(ptr), memorder);
+}
+inline bool __device__ __atomic_always_lock_free_simt(size_t size, void *ptr) {
+  return __atomic_always_lock_free_simt_(size, const_cast<void *>(ptr));
+}
+inline bool __device__ __atomic_is_lock_free_simt(size_t size, void *ptr) {
+  return __atomic_is_lock_free_simt_(size, const_cast<void *>(ptr));
+}
+
+/*
+    volatile
+*/
+
+template <class type>
+type __device__ __atomic_load_n_simt(const volatile type *ptr, int memorder) {
+  return __atomic_load_n_simt_(const_cast<const type *>(ptr), memorder);
+}
+template <class type>
+void __device__ __atomic_load_simt(const volatile type *ptr, type *ret,
+                                   int memorder) {
+  __atomic_load_simt_(const_cast<const type *>(ptr), ret, memorder);
+}
+template <class type>
+void __device__ __atomic_store_n_simt(volatile type *ptr, type val,
+                                      int memorder) {
+  __atomic_store_n_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+void __device__ __atomic_store_simt(volatile type *ptr, type *val,
+                                    int memorder) {
+  __atomic_store_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_exchange_n_simt(volatile type *ptr, type val,
+                                         int memorder) {
+  return __atomic_exchange_n_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+void __device__ __atomic_exchange_simt(volatile type *ptr, type *val, type *ret,
+                                       int memorder) {
+  __atomic_exchange_simt_(const_cast<type *>(ptr), val, ret, memorder);
+}
+template <class type>
+bool __device__ __atomic_compare_exchange_n_simt(volatile type *ptr,
+                                                 type *expected, type desired,
+                                                 bool weak,
+                                                 int success_memorder,
+                                                 int failure_memorder) {
+  return __atomic_compare_exchange_n_simt_(const_cast<type *>(ptr), expected,
+                                           desired, weak, success_memorder,
+                                           failure_memorder);
+}
+template <class type>
+bool __device__ __atomic_compare_exchange_simt(volatile type *ptr,
+                                               type *expected, type *desired,
+                                               bool weak, int success_memorder,
+                                               int failure_memorder) {
+  return __atomic_compare_exchange_simt_(const_cast<type *>(ptr), expected,
+                                         desired, weak, success_memorder,
+                                         failure_memorder);
+}
+template <class type, class delta>
+type __device__ __atomic_fetch_add_simt(volatile type *ptr, delta val,
+                                        int memorder) {
+  return __atomic_fetch_add_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type, class delta>
+type __device__ __atomic_fetch_sub_simt(volatile type *ptr, delta val,
+                                        int memorder) {
+  return __atomic_fetch_sub_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_fetch_and_simt(volatile type *ptr, type val,
+                                        int memorder) {
+  return __atomic_fetch_and_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_fetch_xor_simt(volatile type *ptr, type val,
+                                        int memorder) {
+  return __atomic_fetch_xor_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+type __device__ __atomic_fetch_or_simt(volatile type *ptr, type val,
+                                       int memorder) {
+  return __atomic_fetch_or_simt_(const_cast<type *>(ptr), val, memorder);
+}
+template <class type>
+bool __device__ __atomic_test_and_set_simt(volatile void *ptr, int memorder) {
+  return __atomic_test_and_set_simt_(const_cast<void *>(ptr), memorder);
+}
+template <class type>
+void __device__ __atomic_clear_simt(volatile void *ptr, int memorder) {
+  return __atomic_clear_simt_(const_cast<void *>(ptr), memorder);
+}
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  //_SIMT_DETAILS_CONFIG
+
+#ifndef KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
+/*
+    builtins
+*/
+
+#define __atomic_load_n __atomic_load_n_simt
+#define __atomic_load __atomic_load_simt
+#define __atomic_store_n __atomic_store_n_simt
+#define __atomic_store __atomic_store_simt
+#define __atomic_exchange_n __atomic_exchange_n_simt
+#define __atomic_exchange __atomic_exchange_simt
+#define __atomic_compare_exchange_n __atomic_compare_exchange_n_simt
+#define __atomic_compare_exchange __atomic_compare_exchange_simt
+#define __atomic_fetch_add __atomic_fetch_add_simt
+#define __atomic_fetch_sub __atomic_fetch_sub_simt
+#define __atomic_fetch_and __atomic_fetch_and_simt
+#define __atomic_fetch_xor __atomic_fetch_xor_simt
+#define __atomic_fetch_or __atomic_fetch_or_simt
+#define __atomic_test_and_set __atomic_test_and_set_simt
+#define __atomic_clear __atomic_clear_simt
+#define __atomic_always_lock_free __atomic_always_lock_free_simt
+#define __atomic_is_lock_free __atomic_is_lock_free_simt
+#define __atomic_thread_fence __atomic_thread_fence_simt
+#define __atomic_signal_fence __atomic_signal_fence_simt
+
+#define KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
+
+#endif  //__CUDA_ARCH__ && KOKKOS_ENABLE_CUDA_ASM_ATOMICS
+#endif  // KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp
new file mode 100644 (file)
index 0000000..d7cd1ba
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifdef KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
+
+#undef __atomic_load_n
+#undef __atomic_load
+#undef __atomic_store_n
+#undef __atomic_store
+#undef __atomic_exchange_n
+#undef __atomic_exchange
+#undef __atomic_compare_exchange_n
+#undef __atomic_compare_exchange
+#undef __atomic_fetch_add
+#undef __atomic_fetch_sub
+#undef __atomic_fetch_and
+#undef __atomic_fetch_xor
+#undef __atomic_fetch_or
+#undef __atomic_test_and_set
+#undef __atomic_clear
+#undef __atomic_always_lock_free
+#undef __atomic_is_lock_free
+#undef __atomic_thread_fence
+#undef __atomic_signal_fence
+
+#undef KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
+
+#endif  // KOKKOS_SIMT_ATOMIC_BUILTIN_REPLACEMENTS_DEFINED
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp
new file mode 100644 (file)
index 0000000..993c8d1
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_INTERNAL_HPP
+#define KOKKOS_CUDA_INTERNAL_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+inline int cuda_max_active_blocks_per_sm(cudaDeviceProp const& properties,
+                                         cudaFuncAttributes const& attributes,
+                                         int block_size, size_t dynamic_shmem) {
+  // Limits due do registers/SM
+  int const regs_per_sm     = properties.regsPerMultiprocessor;
+  int const regs_per_thread = attributes.numRegs;
+  int const max_blocks_regs = regs_per_sm / (regs_per_thread * block_size);
+
+  // Limits due to shared memory/SM
+  size_t const shmem_per_sm            = properties.sharedMemPerMultiprocessor;
+  size_t const shmem_per_block         = properties.sharedMemPerBlock;
+  size_t const static_shmem            = attributes.sharedSizeBytes;
+  size_t const dynamic_shmem_per_block = attributes.maxDynamicSharedSizeBytes;
+  size_t const total_shmem             = static_shmem + dynamic_shmem;
+
+  int const max_blocks_shmem =
+      total_shmem > shmem_per_block || dynamic_shmem > dynamic_shmem_per_block
+          ? 0
+          : (total_shmem > 0 ? (int)shmem_per_sm / total_shmem
+                             : max_blocks_regs);
+
+  // Limits due to blocks/SM
+#if CUDA_VERSION >= 11000
+  int const max_blocks_per_sm = properties.maxBlocksPerMultiProcessor;
+#else
+  int const max_blocks_per_sm = [&properties]() {
+    switch (properties.major) {
+      case 3: return 16;
+      case 5:
+      case 6: return 32;
+      case 7: {
+        int isTuring = properties.minor == 5;
+        return (isTuring) ? 16 : 32;
+      }
+      default:
+        throw_runtime_exception("Unknown device in cuda block size deduction");
+        return 0;
+    }
+  }();
+#endif
+
+  // Overall occupancy in blocks
+  return std::min({max_blocks_regs, max_blocks_shmem, max_blocks_per_sm});
+}
+
+template <typename UnaryFunction, typename LaunchBounds>
+inline int cuda_deduce_block_size(bool early_termination,
+                                  cudaDeviceProp const& properties,
+                                  cudaFuncAttributes const& attributes,
+                                  UnaryFunction block_size_to_dynamic_shmem,
+                                  LaunchBounds) {
+  // Limits
+  int const max_threads_per_sm = properties.maxThreadsPerMultiProcessor;
+  // unsure if I need to do that or if this is already accounted for in the
+  // functor attributes
+  int const max_threads_per_block =
+      std::min(LaunchBounds::maxTperB == 0 ? (int)properties.maxThreadsPerBlock
+                                           : (int)LaunchBounds::maxTperB,
+               attributes.maxThreadsPerBlock);
+  int const min_blocks_per_sm =
+      LaunchBounds::minBperSM == 0 ? 1 : LaunchBounds::minBperSM;
+
+  // Recorded maximum
+  int opt_block_size     = 0;
+  int opt_threads_per_sm = 0;
+
+  for (int block_size = max_threads_per_block; block_size > 0;
+       block_size -= 32) {
+    size_t const dynamic_shmem = block_size_to_dynamic_shmem(block_size);
+
+    int blocks_per_sm = cuda_max_active_blocks_per_sm(
+        properties, attributes, block_size, dynamic_shmem);
+
+    int threads_per_sm = blocks_per_sm * block_size;
+
+    if (threads_per_sm > max_threads_per_sm) {
+      blocks_per_sm  = max_threads_per_sm / block_size;
+      threads_per_sm = blocks_per_sm * block_size;
+    }
+
+    if (blocks_per_sm >= min_blocks_per_sm) {
+      // The logic prefers smaller block sizes over larger ones to
+      // give more flexibility to the scheduler.
+      // But don't go below 128 where performance suffers significantly
+      // for simple copy/set kernels.
+      if ((threads_per_sm > opt_threads_per_sm) ||
+          ((block_size >= 128) && (threads_per_sm == opt_threads_per_sm))) {
+        opt_block_size     = block_size;
+        opt_threads_per_sm = threads_per_sm;
+      }
+    }
+
+    if (early_termination && opt_block_size != 0) break;
+  }
+
+  return opt_block_size;
+}
+
+template <class FunctorType, class LaunchBounds>
+int cuda_get_max_block_size(const CudaInternal* cuda_instance,
+                            const cudaFuncAttributes& attr,
+                            const FunctorType& f, const size_t vector_length,
+                            const size_t shmem_block,
+                            const size_t shmem_thread) {
+  (void)cuda_instance;
+
+  auto const& prop = Kokkos::Cuda().cuda_device_prop();
+
+  auto const block_size_to_dynamic_shmem = [&f, vector_length, shmem_block,
+                                            shmem_thread](int block_size) {
+    size_t const functor_shmem =
+        Kokkos::Impl::FunctorTeamShmemSize<FunctorType>::value(
+            f, block_size / vector_length);
+
+    size_t const dynamic_shmem = shmem_block +
+                                 shmem_thread * (block_size / vector_length) +
+                                 functor_shmem;
+    return dynamic_shmem;
+  };
+
+  return cuda_deduce_block_size(true, prop, attr, block_size_to_dynamic_shmem,
+                                LaunchBounds{});
+}
+
+template <class FunctorType, class LaunchBounds>
+int cuda_get_opt_block_size(const CudaInternal* cuda_instance,
+                            const cudaFuncAttributes& attr,
+                            const FunctorType& f, const size_t vector_length,
+                            const size_t shmem_block,
+                            const size_t shmem_thread) {
+  (void)cuda_instance;
+
+  auto const& prop = Kokkos::Cuda().cuda_device_prop();
+
+  auto const block_size_to_dynamic_shmem = [&f, vector_length, shmem_block,
+                                            shmem_thread](int block_size) {
+    size_t const functor_shmem =
+        Kokkos::Impl::FunctorTeamShmemSize<FunctorType>::value(
+            f, block_size / vector_length);
+
+    size_t const dynamic_shmem = shmem_block +
+                                 shmem_thread * (block_size / vector_length) +
+                                 functor_shmem;
+    return dynamic_shmem;
+  };
+
+  return cuda_deduce_block_size(false, prop, attr, block_size_to_dynamic_shmem,
+                                LaunchBounds{});
+}
+
+// Assuming cudaFuncSetCacheConfig(MyKernel, cudaFuncCachePreferL1)
+// NOTE these number can be obtained several ways:
+// * One option is to download the CUDA Occupancy Calculator spreadsheet, select
+// "Compute Capability" first and check what is the smallest "Shared Memory
+// Size Config" that is available.  The "Shared Memory Per Multiprocessor" in
+// bytes is then to be found below in the summary.
+// * Another option would be to look for the information in the "Tuning
+// Guide(s)" of the CUDA Toolkit Documentation for each GPU architecture, in
+// the "Shared Memory" section (more tedious)
+inline size_t get_shmem_per_sm_prefer_l1(cudaDeviceProp const& properties) {
+  int const compute_capability = properties.major * 10 + properties.minor;
+  return [compute_capability]() {
+    switch (compute_capability) {
+      case 30:
+      case 32:
+      case 35: return 16;
+      case 37: return 80;
+      case 50:
+      case 53:
+      case 60:
+      case 62: return 64;
+      case 52:
+      case 61: return 96;
+      case 70:
+      case 80:
+      case 86: return 8;
+      case 75: return 32;
+      default:
+        Kokkos::Impl::throw_runtime_exception(
+            "Unknown device in cuda block size deduction");
+    }
+    return 0;
+  }() * 1024;
+}
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_ENABLE_CUDA
+#endif  /* #ifndef KOKKOS_CUDA_INTERNAL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Error.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Error.hpp
new file mode 100644 (file)
index 0000000..e28e964
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_ERROR_HPP
+#define KOKKOS_CUDA_ERROR_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+#include <iosfwd>
+
+namespace Kokkos {
+namespace Impl {
+
+void cuda_stream_synchronize(
+    const cudaStream_t stream,
+    Kokkos::Tools::Experimental::SpecialSynchronizationCases reason,
+    const std::string& name);
+void cuda_device_synchronize(const std::string& name);
+void cuda_stream_synchronize(const cudaStream_t stream,
+                             const std::string& name);
+
+[[noreturn]] void cuda_internal_error_throw(cudaError e, const char* name,
+                                            const char* file = nullptr,
+                                            const int line   = 0);
+
+#ifndef KOKKOS_COMPILER_NVHPC
+[[noreturn]]
+#endif
+             void cuda_internal_error_abort(cudaError e, const char* name,
+                                            const char* file = nullptr,
+                                            const int line   = 0);
+
+inline void cuda_internal_safe_call(cudaError e, const char* name,
+                                    const char* file = nullptr,
+                                    const int line   = 0) {
+  // 1. Success -> normal continuation.
+  // 2. Error codes for which, to continue using CUDA, the process must be
+  //    terminated and relaunched -> call abort on the host-side.
+  // 3. Any other error code -> throw a runtime error.
+  switch (e) {
+    case cudaSuccess: break;
+    case cudaErrorIllegalAddress:
+    case cudaErrorAssert:
+    case cudaErrorHardwareStackError:
+    case cudaErrorIllegalInstruction:
+    case cudaErrorMisalignedAddress:
+    case cudaErrorInvalidAddressSpace:
+    case cudaErrorInvalidPc:
+    case cudaErrorLaunchFailure:
+      cuda_internal_error_abort(e, name, file, line);
+      break;
+    default: cuda_internal_error_throw(e, name, file, line); break;
+  }
+}
+
+#define KOKKOS_IMPL_CUDA_SAFE_CALL(call) \
+  Kokkos::Impl::cuda_internal_safe_call(call, #call, __FILE__, __LINE__)
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+
+KOKKOS_DEPRECATED
+inline void cuda_internal_safe_call_deprecated(cudaError e, const char* name,
+                                               const char* file = nullptr,
+                                               const int line   = 0) {
+  cuda_internal_safe_call(e, name, file, line);
+}
+
+#define CUDA_SAFE_CALL(call)                                              \
+  Kokkos::Impl::cuda_internal_safe_call_deprecated(call, #call, __FILE__, \
+                                                   __LINE__)
+
+#endif
+
+}  // namespace Impl
+
+namespace Experimental {
+
+class CudaRawMemoryAllocationFailure : public RawMemoryAllocationFailure {
+ private:
+  using base_t = RawMemoryAllocationFailure;
+
+  cudaError_t m_error_code = cudaSuccess;
+
+  static FailureMode get_failure_mode(cudaError_t error_code) {
+    switch (error_code) {
+      case cudaErrorMemoryAllocation: return FailureMode::OutOfMemoryError;
+      case cudaErrorInvalidValue: return FailureMode::InvalidAllocationSize;
+      // TODO handle cudaErrorNotSupported for cudaMallocManaged
+      default: return FailureMode::Unknown;
+    }
+  }
+
+ public:
+  // using base_t::base_t;
+  // would trigger
+  //
+  // error: cannot determine the exception specification of the default
+  // constructor due to a circular dependency
+  //
+  // using NVCC 9.1 and gcc 7.4
+  CudaRawMemoryAllocationFailure(
+      size_t arg_attempted_size, size_t arg_attempted_alignment,
+      FailureMode arg_failure_mode = FailureMode::OutOfMemoryError,
+      AllocationMechanism arg_mechanism =
+          AllocationMechanism::StdMalloc) noexcept
+      : base_t(arg_attempted_size, arg_attempted_alignment, arg_failure_mode,
+               arg_mechanism) {}
+
+  CudaRawMemoryAllocationFailure(size_t arg_attempted_size,
+                                 cudaError_t arg_error_code,
+                                 AllocationMechanism arg_mechanism) noexcept
+      : base_t(arg_attempted_size, /* CudaSpace doesn't handle alignment? */ 1,
+               get_failure_mode(arg_error_code), arg_mechanism),
+        m_error_code(arg_error_code) {}
+
+  void append_additional_error_information(std::ostream& o) const override;
+};
+
+}  // end namespace Experimental
+
+}  // namespace Kokkos
+
+#endif  // KOKKOS_ENABLE_CUDA
+#endif  // KOKKOS_CUDA_ERROR_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_GraphNodeKernel.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_GraphNodeKernel.hpp
new file mode 100644 (file)
index 0000000..d6fadd8
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_CUDA_GRAPHNODEKERNEL_IMPL_HPP
+#define KOKKOS_KOKKOS_CUDA_GRAPHNODEKERNEL_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_CUDA_ENABLE_GRAPHS)
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>    // GraphAccess needs to be complete
+#include <impl/Kokkos_SharedAlloc.hpp>  // SharedAllocationRecord
+
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_Parallel_Reduce.hpp>
+#include <Kokkos_PointerOwnership.hpp>
+
+#include <Kokkos_Cuda.hpp>
+#include <cuda_runtime_api.h>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class PolicyType, class Functor, class PatternTag, class... Args>
+class GraphNodeKernelImpl<Kokkos::Cuda, PolicyType, Functor, PatternTag,
+                          Args...>
+    : public PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
+                                              Args..., Kokkos::Cuda>::type {
+ private:
+  using base_t =
+      typename PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
+                                                Args..., Kokkos::Cuda>::type;
+  using size_type = Kokkos::Cuda::size_type;
+  // These are really functioning as optional references, though I'm not sure
+  // that the cudaGraph_t one needs to be since it's a pointer under the
+  // covers and we're not modifying it
+  Kokkos::ObservingRawPtr<const cudaGraph_t> m_graph_ptr    = nullptr;
+  Kokkos::ObservingRawPtr<cudaGraphNode_t> m_graph_node_ptr = nullptr;
+  // Note: owned pointer to CudaSpace memory (used for global memory launches),
+  // which we're responsible for deallocating, but not responsible for calling
+  // its destructor.
+  using Record = Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
+  // Basically, we have to make this mutable for the same reasons that the
+  // global kernel buffers in the Cuda instance are mutable...
+  mutable Kokkos::OwningRawPtr<base_t> m_driver_storage = nullptr;
+
+ public:
+  using Policy       = PolicyType;
+  using graph_kernel = GraphNodeKernelImpl;
+
+  // TODO Ensure the execution space of the graph is the same as the one
+  //      attached to the policy?
+  // TODO @graph kernel name info propagation
+  template <class PolicyDeduced, class... ArgsDeduced>
+  GraphNodeKernelImpl(std::string, Kokkos::Cuda const&, Functor arg_functor,
+                      PolicyDeduced&& arg_policy, ArgsDeduced&&... args)
+      // This is super ugly, but it works for now and is the most minimal change
+      // to the codebase for now...
+      : base_t(std::move(arg_functor), (PolicyDeduced &&) arg_policy,
+               (ArgsDeduced &&) args...) {}
+
+  // FIXME @graph Forward through the instance once that works in the backends
+  template <class PolicyDeduced>
+  GraphNodeKernelImpl(Kokkos::Cuda const& ex, Functor arg_functor,
+                      PolicyDeduced&& arg_policy)
+      : GraphNodeKernelImpl("", ex, std::move(arg_functor),
+                            (PolicyDeduced &&) arg_policy) {}
+
+  ~GraphNodeKernelImpl() {
+    if (m_driver_storage) {
+      // We should be the only owner, but this is still the easiest way to
+      // allocate and deallocate aligned memory for these sorts of things
+      Record::decrement(Record::get_record(m_driver_storage));
+    }
+  }
+
+  void set_cuda_graph_ptr(cudaGraph_t* arg_graph_ptr) {
+    m_graph_ptr = arg_graph_ptr;
+  }
+  void set_cuda_graph_node_ptr(cudaGraphNode_t* arg_node_ptr) {
+    m_graph_node_ptr = arg_node_ptr;
+  }
+  cudaGraphNode_t* get_cuda_graph_node_ptr() const { return m_graph_node_ptr; }
+  cudaGraph_t const* get_cuda_graph_ptr() const { return m_graph_ptr; }
+
+  Kokkos::ObservingRawPtr<base_t> allocate_driver_memory_buffer() const {
+    KOKKOS_EXPECTS(m_driver_storage == nullptr)
+
+    auto* record = Record::allocate(
+        Kokkos::CudaSpace{}, "GraphNodeKernel global memory functor storage",
+        sizeof(base_t));
+
+    Record::increment(record);
+    m_driver_storage = reinterpret_cast<base_t*>(record->data());
+    KOKKOS_ENSURES(m_driver_storage != nullptr)
+    return m_driver_storage;
+  }
+};
+
+struct CudaGraphNodeAggregateKernel {
+  using graph_kernel = CudaGraphNodeAggregateKernel;
+
+  // Aggregates don't need a policy, but for the purposes of checking the static
+  // assertions about graph kerenls,
+  struct Policy {
+    using is_graph_kernel = std::true_type;
+  };
+};
+
+template <class KernelType,
+          class Tag =
+              typename PatternTagFromImplSpecialization<KernelType>::type>
+struct get_graph_node_kernel_type
+    : identity<GraphNodeKernelImpl<Kokkos::Cuda, typename KernelType::Policy,
+                                   typename KernelType::functor_type, Tag>> {};
+template <class KernelType>
+struct get_graph_node_kernel_type<KernelType, Kokkos::ParallelReduceTag>
+    : identity<GraphNodeKernelImpl<Kokkos::Cuda, typename KernelType::Policy,
+                                   typename KernelType::functor_type,
+                                   Kokkos::ParallelReduceTag,
+                                   typename KernelType::reducer_type>> {};
+
+//==============================================================================
+// <editor-fold desc="get_cuda_graph_*() helper functions"> {{{1
+
+template <class KernelType>
+auto* allocate_driver_storage_for_kernel(KernelType const& kernel) {
+  using graph_node_kernel_t =
+      typename get_graph_node_kernel_type<KernelType>::type;
+  auto const& kernel_as_graph_kernel =
+      static_cast<graph_node_kernel_t const&>(kernel);
+  // TODO @graphs we need to somehow indicate the need for a fence in the
+  //              destructor of the GraphImpl object (so that we don't have to
+  //              just always do it)
+  return kernel_as_graph_kernel.allocate_driver_memory_buffer();
+}
+
+template <class KernelType>
+auto const& get_cuda_graph_from_kernel(KernelType const& kernel) {
+  using graph_node_kernel_t =
+      typename get_graph_node_kernel_type<KernelType>::type;
+  auto const& kernel_as_graph_kernel =
+      static_cast<graph_node_kernel_t const&>(kernel);
+  cudaGraph_t const* graph_ptr = kernel_as_graph_kernel.get_cuda_graph_ptr();
+  KOKKOS_EXPECTS(graph_ptr != nullptr);
+  return *graph_ptr;
+}
+
+template <class KernelType>
+auto& get_cuda_graph_node_from_kernel(KernelType const& kernel) {
+  using graph_node_kernel_t =
+      typename get_graph_node_kernel_type<KernelType>::type;
+  auto const& kernel_as_graph_kernel =
+      static_cast<graph_node_kernel_t const&>(kernel);
+  auto* graph_node_ptr = kernel_as_graph_kernel.get_cuda_graph_node_ptr();
+  KOKKOS_EXPECTS(graph_node_ptr != nullptr);
+  return *graph_node_ptr;
+}
+
+// </editor-fold> end get_cuda_graph_*() helper functions }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // defined(KOKKOS_ENABLE_CUDA)
+#endif  // KOKKOS_KOKKOS_CUDA_GRAPHNODEKERNEL_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_GraphNode_Impl.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_GraphNode_Impl.hpp
new file mode 100644 (file)
index 0000000..f4539cd
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_CUDA_GRAPHNODE_IMPL_HPP
+#define KOKKOS_KOKKOS_CUDA_GRAPHNODE_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_CUDA_ENABLE_GRAPHS)
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>  // GraphAccess needs to be complete
+
+#include <Kokkos_Cuda.hpp>
+#include <cuda_runtime_api.h>
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct GraphNodeBackendSpecificDetails<Kokkos::Cuda> {
+  cudaGraphNode_t node = nullptr;
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Ctors, destructor, and assignment"> {{{2
+
+  explicit GraphNodeBackendSpecificDetails() = default;
+
+  explicit GraphNodeBackendSpecificDetails(
+      _graph_node_is_root_ctor_tag) noexcept {}
+
+  // </editor-fold> end Ctors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+};
+
+template <class Kernel, class PredecessorRef>
+struct GraphNodeBackendDetailsBeforeTypeErasure<Kokkos::Cuda, Kernel,
+                                                PredecessorRef> {
+ protected:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="ctors, destructor, and assignment"> {{{2
+
+  GraphNodeBackendDetailsBeforeTypeErasure(
+      Kokkos::Cuda const&, Kernel&, PredecessorRef const&,
+      GraphNodeBackendSpecificDetails<Kokkos::Cuda>&) noexcept {}
+
+  GraphNodeBackendDetailsBeforeTypeErasure(
+      Kokkos::Cuda const&, _graph_node_is_root_ctor_tag,
+      GraphNodeBackendSpecificDetails<Kokkos::Cuda>&) noexcept {}
+
+  // </editor-fold> end ctors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#include <Cuda/Kokkos_Cuda_GraphNodeKernel.hpp>
+
+#endif  // defined(KOKKOS_ENABLE_CUDA)
+#endif  // KOKKOS_KOKKOS_CUDA_GRAPHNODE_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Graph_Impl.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Graph_Impl.hpp
new file mode 100644 (file)
index 0000000..bd514f5
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_CUDA_GRAPH_IMPL_HPP
+#define KOKKOS_KOKKOS_CUDA_GRAPH_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_CUDA_ENABLE_GRAPHS)
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_GraphImpl.hpp>  // GraphAccess needs to be complete
+
+// GraphNodeImpl needs to be complete because GraphImpl here is a full
+// specialization and not just a partial one
+#include <impl/Kokkos_GraphNodeImpl.hpp>
+#include <Cuda/Kokkos_Cuda_GraphNode_Impl.hpp>
+
+#include <Kokkos_Cuda.hpp>
+#include <cuda_runtime_api.h>
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct GraphImpl<Kokkos::Cuda> {
+ public:
+  using execution_space = Kokkos::Cuda;
+
+ private:
+  execution_space m_execution_space;
+  cudaGraph_t m_graph          = nullptr;
+  cudaGraphExec_t m_graph_exec = nullptr;
+
+  using cuda_graph_flags_t = unsigned int;
+
+  using node_details_t = GraphNodeBackendSpecificDetails<Kokkos::Cuda>;
+
+  void _instantiate_graph() {
+    constexpr size_t error_log_size = 256;
+    cudaGraphNode_t error_node      = nullptr;
+    char error_log[error_log_size];
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphInstantiate(
+        &m_graph_exec, m_graph, &error_node, error_log, error_log_size));
+    // TODO @graphs print out errors
+  }
+
+ public:
+  using root_node_impl_t =
+      GraphNodeImpl<Kokkos::Cuda, Kokkos::Experimental::TypeErasedTag,
+                    Kokkos::Experimental::TypeErasedTag>;
+  using aggregate_kernel_impl_t = CudaGraphNodeAggregateKernel;
+  using aggregate_node_impl_t =
+      GraphNodeImpl<Kokkos::Cuda, aggregate_kernel_impl_t,
+                    Kokkos::Experimental::TypeErasedTag>;
+
+  // Not moveable or copyable; it spends its whole life as a shared_ptr in the
+  // Graph object
+  GraphImpl()                 = delete;
+  GraphImpl(GraphImpl const&) = delete;
+  GraphImpl(GraphImpl&&)      = delete;
+  GraphImpl& operator=(GraphImpl const&) = delete;
+  GraphImpl& operator=(GraphImpl&&) = delete;
+  ~GraphImpl() {
+    // TODO @graphs we need to somehow indicate the need for a fence in the
+    //              destructor of the GraphImpl object (so that we don't have to
+    //              just always do it)
+    m_execution_space.fence("Kokkos::GraphImpl::~GraphImpl: Graph Destruction");
+    KOKKOS_EXPECTS(bool(m_graph))
+    if (bool(m_graph_exec)) {
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphExecDestroy(m_graph_exec));
+    }
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphDestroy(m_graph));
+  };
+
+  explicit GraphImpl(Kokkos::Cuda arg_instance)
+      : m_execution_space(std::move(arg_instance)) {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaGraphCreate(&m_graph, cuda_graph_flags_t{0}));
+  }
+
+  void add_node(std::shared_ptr<aggregate_node_impl_t> const& arg_node_ptr) {
+    // All of the predecessors are just added as normal, so all we need to
+    // do here is add an empty node
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaGraphAddEmptyNode(&(arg_node_ptr->node_details_t::node), m_graph,
+                              /* dependencies = */ nullptr,
+                              /* numDependencies = */ 0));
+  }
+
+  template <class NodeImpl>
+  //  requires NodeImplPtr is a shared_ptr to specialization of GraphNodeImpl
+  //  Also requires that the kernel has the graph node tag in it's policy
+  void add_node(std::shared_ptr<NodeImpl> const& arg_node_ptr) {
+    static_assert(
+        NodeImpl::kernel_type::Policy::is_graph_kernel::value,
+        "Something has gone horribly wrong, but it's too complicated to "
+        "explain here.  Buy Daisy a coffee and she'll explain it to you.");
+    KOKKOS_EXPECTS(bool(arg_node_ptr));
+    // The Kernel launch from the execute() method has been shimmed to insert
+    // the node into the graph
+    auto& kernel = arg_node_ptr->get_kernel();
+    // note: using arg_node_ptr->node_details_t::node caused an ICE in NVCC 10.1
+    auto& cuda_node = static_cast<node_details_t*>(arg_node_ptr.get())->node;
+    KOKKOS_EXPECTS(!bool(cuda_node));
+    kernel.set_cuda_graph_ptr(&m_graph);
+    kernel.set_cuda_graph_node_ptr(&cuda_node);
+    kernel.execute();
+    KOKKOS_ENSURES(bool(cuda_node));
+  }
+
+  template <class NodeImplPtr, class PredecessorRef>
+  // requires PredecessorRef is a specialization of GraphNodeRef that has
+  // already been added to this graph and NodeImpl is a specialization of
+  // GraphNodeImpl that has already been added to this graph.
+  void add_predecessor(NodeImplPtr arg_node_ptr, PredecessorRef arg_pred_ref) {
+    KOKKOS_EXPECTS(bool(arg_node_ptr))
+    auto pred_ptr = GraphAccess::get_node_ptr(arg_pred_ref);
+    KOKKOS_EXPECTS(bool(pred_ptr))
+
+    // clang-format off
+    // NOTE const-qualifiers below are commented out because of an API break
+    // from CUDA 10.0 to CUDA 10.1
+    // cudaGraphAddDependencies(cudaGraph_t, cudaGraphNode_t*, cudaGraphNode_t*, size_t)
+    // cudaGraphAddDependencies(cudaGraph_t, const cudaGraphNode_t*, const cudaGraphNode_t*, size_t)
+    // clang-format on
+    auto /*const*/& pred_cuda_node = pred_ptr->node_details_t::node;
+    KOKKOS_EXPECTS(bool(pred_cuda_node))
+
+    auto /*const*/& cuda_node = arg_node_ptr->node_details_t::node;
+    KOKKOS_EXPECTS(bool(cuda_node))
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaGraphAddDependencies(m_graph, &pred_cuda_node, &cuda_node, 1));
+  }
+
+  void submit() {
+    if (!bool(m_graph_exec)) {
+      _instantiate_graph();
+    }
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaGraphLaunch(m_graph_exec, m_execution_space.cuda_stream()));
+  }
+
+  execution_space const& get_execution_space() const noexcept {
+    return m_execution_space;
+  }
+
+  auto create_root_node_ptr() {
+    KOKKOS_EXPECTS(bool(m_graph))
+    KOKKOS_EXPECTS(!bool(m_graph_exec))
+    auto rv = std::make_shared<root_node_impl_t>(
+        get_execution_space(), _graph_node_is_root_ctor_tag{});
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaGraphAddEmptyNode(&(rv->node_details_t::node), m_graph,
+                              /* dependencies = */ nullptr,
+                              /* numDependencies = */ 0));
+    KOKKOS_ENSURES(bool(rv->node_details_t::node))
+    return rv;
+  }
+
+  template <class... PredecessorRefs>
+  // See requirements/expectations in GraphBuilder
+  auto create_aggregate_ptr(PredecessorRefs&&...) {
+    // The attachment to predecessors, which is all we really need, happens
+    // in the generic layer, which calls through to add_predecessor for
+    // each predecessor ref, so all we need to do here is create the (trivial)
+    // aggregate node.
+    return std::make_shared<aggregate_node_impl_t>(
+        m_execution_space, _graph_node_kernel_ctor_tag{},
+        aggregate_kernel_impl_t{});
+  }
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // defined(KOKKOS_ENABLE_CUDA)
+#endif  // KOKKOS_KOKKOS_CUDA_GRAPH_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Half_Conversion.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Half_Conversion.hpp
new file mode 100644 (file)
index 0000000..40a2635
--- /dev/null
@@ -0,0 +1,543 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_HALF_HPP_
+#define KOKKOS_CUDA_HALF_HPP_
+
+#ifdef KOKKOS_IMPL_CUDA_HALF_TYPE_DEFINED
+
+#include <Kokkos_Half.hpp>
+#include <Kokkos_NumericTraits.hpp>  // reduction_identity
+
+#if CUDA_VERSION >= 11000
+#include <cuda_bf16.h>
+#endif
+
+namespace Kokkos {
+namespace Experimental {
+
+/************************** half conversions **********************************/
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(half_t val) { return val; }
+
+// CUDA before 11.1 only has the half <-> float conversions marked host device
+// So we will largely convert to float on the host for conversion
+// But still call the correct functions on the device
+#if (CUDA_VERSION < 11010)
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(float val) { return half_t(__float2half(val)); }
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(bool val) { return cast_to_half(static_cast<float>(val)); }
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(double val) {
+  // double2half was only introduced in CUDA 11 too
+  return half_t(__float2half(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(short val) {
+  KOKKOS_IF_ON_DEVICE((return half_t(__short2half_rn(val));))
+  KOKKOS_IF_ON_HOST((return half_t(__float2half(static_cast<float>(val)));))
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned short val) {
+  KOKKOS_IF_ON_DEVICE((return half_t(__ushort2half_rn(val));))
+  KOKKOS_IF_ON_HOST((return half_t(__float2half(static_cast<float>(val)));))
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(int val) {
+  KOKKOS_IF_ON_DEVICE((return half_t(__int2half_rn(val));))
+  KOKKOS_IF_ON_HOST((return half_t(__float2half(static_cast<float>(val)));))
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned int val) {
+  KOKKOS_IF_ON_DEVICE((return half_t(__uint2half_rn(val));))
+  KOKKOS_IF_ON_HOST((return half_t(__float2half(static_cast<float>(val)));))
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long long val) {
+  KOKKOS_IF_ON_DEVICE((return half_t(__ll2half_rn(val));))
+  KOKKOS_IF_ON_HOST((return half_t(__float2half(static_cast<float>(val)));))
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long long val) {
+  KOKKOS_IF_ON_DEVICE((return half_t(__ull2half_rn(val));))
+  KOKKOS_IF_ON_HOST((return half_t(__float2half(static_cast<float>(val)));))
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long val) {
+  return cast_to_half(static_cast<long long>(val));
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long val) {
+  return cast_to_half(static_cast<unsigned long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_half(half_t val) {
+  return __half2float(half_t::impl_type(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, bool>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<float>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_half(half_t val) {
+  KOKKOS_IF_ON_DEVICE((return __half2short_rz(half_t::impl_type(val));))
+  KOKKOS_IF_ON_HOST(
+      (return static_cast<T>(__half2float(half_t::impl_type(val)));))
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+    cast_from_half(half_t val) {
+  KOKKOS_IF_ON_DEVICE((return __half2ushort_rz(half_t::impl_type(val));))
+  KOKKOS_IF_ON_HOST(
+      (return static_cast<T>(__half2float(half_t::impl_type(val)));))
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_half(half_t val) {
+  KOKKOS_IF_ON_DEVICE((return __half2int_rz(half_t::impl_type(val));))
+  KOKKOS_IF_ON_HOST(
+      (return static_cast<T>(__half2float(half_t::impl_type(val)));))
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned>::value, T>
+cast_from_half(half_t val) {
+  KOKKOS_IF_ON_DEVICE((return __half2uint_rz(half_t::impl_type(val));))
+  KOKKOS_IF_ON_HOST(
+      (return static_cast<T>(__half2float(half_t::impl_type(val)));))
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_half(half_t val) {
+  KOKKOS_IF_ON_DEVICE((return __half2ll_rz(half_t::impl_type(val));))
+  KOKKOS_IF_ON_HOST(
+      (return static_cast<T>(__half2float(half_t::impl_type(val)));))
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+    cast_from_half(half_t val) {
+  KOKKOS_IF_ON_DEVICE((return __half2ull_rz(half_t::impl_type(val));))
+  KOKKOS_IF_ON_HOST(
+      (return static_cast<T>(__half2float(half_t::impl_type(val)));))
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+    cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<unsigned long long>(val));
+}
+
+#else  // CUDA 11.1 versions follow
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(float val) { return __float2half(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(double val) { return __double2half(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(short val) { return __short2half_rn(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned short val) { return __ushort2half_rn(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(int val) { return __int2half_rn(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned int val) { return __uint2half_rn(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long long val) { return __ll2half_rn(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long long val) { return __ull2half_rn(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long val) {
+  return cast_to_half(static_cast<long long>(val));
+}
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long val) {
+  return cast_to_half(static_cast<unsigned long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_half(half_t val) {
+  return __half2float(__half(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<double>(__half2float(__half(val)));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_half(half_t val) {
+  return __half2short_rz(__half(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+    cast_from_half(half_t val) {
+  return __half2ushort_rz(__half(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_half(half_t val) {
+  return __half2int_rz(__half(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
+cast_from_half(half_t val) {
+  return __half2uint_rz(__half(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_half(half_t val) {
+  return __half2ll_rz(__half(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+    cast_from_half(half_t val) {
+  return __half2ull_rz(__half(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<long long>(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+    cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<unsigned long long>(val));
+}
+#endif
+
+/************************** bhalf conversions *********************************/
+// Go in this branch if CUDA version is >= 11.0.0 and less than 11.1.0 or if the
+// architecture is not Ampere
+#if CUDA_VERSION >= 11000 && \
+    (CUDA_VERSION < 11010 || !defined(KOKKOS_ARCH_AMPERE))
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(bhalf_t val) { return val; }
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(float val) { return bhalf_t(__float2bfloat16(val)); }
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(bool val) {
+  return cast_to_bhalf(static_cast<float>(val));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(double val) {
+  // double2bfloat16 was only introduced in CUDA 11 too
+  return bhalf_t(__float2bfloat16(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(short val) {
+  return bhalf_t(__float2bfloat16(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned short val) {
+  return bhalf_t(__float2bfloat16(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(int val) {
+  return bhalf_t(__float2bfloat16(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned int val) {
+  return bhalf_t(__float2bfloat16(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long long val) {
+  return bhalf_t(__float2bfloat16(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long long val) {
+  return bhalf_t(__float2bfloat16(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long val) {
+  return cast_to_bhalf(static_cast<long long>(val));
+}
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long val) {
+  return cast_to_bhalf(static_cast<unsigned long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return __bfloat162float(bhalf_t::impl_type(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, bool>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(cast_from_bhalf<float>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(__bfloat162float(bhalf_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(__bfloat162float(bhalf_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+    cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(__bfloat162float(bhalf_t::impl_type(val)));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(__bfloat162float(bhalf_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(__bfloat162float(bhalf_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(__bfloat162float(bhalf_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+    cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(__bfloat162float(bhalf_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(cast_from_bhalf<long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+    cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(cast_from_bhalf<unsigned long long>(val));
+}
+#endif  // CUDA_VERSION >= 11000 && CUDA_VERSION < 11010
+
+#if CUDA_VERSION >= 11010 && \
+    ((defined(KOKKOS_ARCH_AMPERE80) || defined(KOKKOS_ARCH_AMPERE86)))
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(bhalf_t val) { return val; }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(float val) { return __float2bfloat16(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(double val) { return __double2bfloat16(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(short val) { return __short2bfloat16_rn(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned short val) { return __ushort2bfloat16_rn(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(int val) { return __int2bfloat16_rn(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned int val) { return __uint2bfloat16_rn(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long long val) { return __ll2bfloat16_rn(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long long val) { return __ull2bfloat16_rn(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long val) {
+  return cast_to_bhalf(static_cast<long long>(val));
+}
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long val) {
+  return cast_to_bhalf(static_cast<unsigned long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return __bfloat162float(__nv_bfloat16(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<double>(__bfloat162float(__nv_bfloat16(val)));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return __bfloat162short_rz(__nv_bfloat16(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+    cast_from_bhalf(bhalf_t val) {
+  return __bfloat162ushort_rz(__nv_bfloat16(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return __bfloat162int_rz(__nv_bfloat16(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return __bfloat162uint_rz(__nv_bfloat16(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return __bfloat162ll_rz(__nv_bfloat16(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+    cast_from_bhalf(bhalf_t val) {
+  return __bfloat162ull_rz(__nv_bfloat16(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(cast_from_bhalf<long long>(val));
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+    cast_from_bhalf(bhalf_t val) {
+  return static_cast<T>(cast_from_bhalf<unsigned long long>(val));
+}
+#endif  // CUDA_VERSION >= 11010
+}  // namespace Experimental
+
+#if (CUDA_VERSION >= 11000)
+template <>
+struct reduction_identity<Kokkos::Experimental::bhalf_t> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float sum() noexcept {
+    return 0.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float prod() noexcept {
+    return 1.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float max() noexcept {
+    return -0x7f7f;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float min() noexcept {
+    return 0x7f7f;
+  }
+};
+#endif  // CUDA_VERSION >= 11000
+
+// use float as the return type for sum and prod since cuda_fp16.h
+// has no constexpr functions for casting to __half
+template <>
+struct reduction_identity<Kokkos::Experimental::half_t> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float sum() noexcept {
+    return 0.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float prod() noexcept {
+    return 1.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float max() noexcept {
+    return -65504.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float min() noexcept {
+    return 65504.0F;
+  }
+};
+
+}  // namespace Kokkos
+#endif  // KOKKOS_IMPL_CUDA_HALF_TYPE_DEFINED
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Half_Impl_Type.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Half_Impl_Type.hpp
new file mode 100644 (file)
index 0000000..e296a92
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_HALF_IMPL_TYPE_HPP_
+#define KOKKOS_CUDA_HALF_IMPL_TYPE_HPP_
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+#if !(defined(KOKKOS_COMPILER_CLANG) && KOKKOS_COMPILER_CLANG < 900) && \
+    !(defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL50) ||  \
+      defined(KOKKOS_ARCH_MAXWELL52))
+#include <cuda_fp16.h>
+#if (CUDA_VERSION >= 11000)
+#include <cuda_bf16.h>
+#endif  // CUDA_VERSION >= 11000
+
+#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
+// Make sure no one else tries to define half_t
+#define KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_CUDA_HALF_TYPE_DEFINED
+
+namespace Kokkos {
+namespace Impl {
+struct half_impl_t {
+  using type = __half;
+};
+#if (CUDA_VERSION >= 11000)
+#define KOKKOS_IMPL_BHALF_TYPE_DEFINED
+struct bhalf_impl_t {
+  using type = __nv_bfloat16;
+};
+#endif  // CUDA_VERSION >= 11000
+}  // namespace Impl
+}  // namespace Kokkos
+#endif  // KOKKOS_IMPL_HALF_TYPE_DEFINED
+#endif  // Disables for half_t on cuda:
+        // Clang/8||KEPLER30||KEPLER32||KEPLER37||MAXWELL50||MAXWELL52
+#endif  // KOKKOS_ENABLE_CUDA
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Instance.cpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Instance.cpp
new file mode 100644 (file)
index 0000000..5811498
--- /dev/null
@@ -0,0 +1,1010 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/*--------------------------------------------------------------------------*/
+/* Kokkos interfaces */
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <Kokkos_Core.hpp>
+
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+#include <Cuda/Kokkos_Cuda_Instance.hpp>
+#include <Cuda/Kokkos_Cuda_Locks.hpp>
+#include <Cuda/Kokkos_Cuda_UniqueToken.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+/*--------------------------------------------------------------------------*/
+/* Standard 'C' libraries */
+#include <cstdlib>
+
+/* Standard 'C++' libraries */
+#include <vector>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+namespace Kokkos {
+namespace Impl {
+
+bool CudaInternal::kokkos_impl_cuda_use_serial_execution_v = false;
+
+void CudaInternal::cuda_set_serial_execution(bool val) {
+  CudaInternal::kokkos_impl_cuda_use_serial_execution_v = val;
+}
+bool CudaInternal::cuda_use_serial_execution() {
+  return CudaInternal::kokkos_impl_cuda_use_serial_execution_v;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+void kokkos_impl_cuda_set_serial_execution(bool val) {
+  Kokkos::Impl::CudaInternal::cuda_set_serial_execution(val);
+}
+bool kokkos_impl_cuda_use_serial_execution() {
+  return Kokkos::Impl::CudaInternal::cuda_use_serial_execution();
+}
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+
+__device__ __constant__ unsigned long kokkos_impl_cuda_constant_memory_buffer
+    [Kokkos::Impl::CudaTraits::ConstantMemoryUsage / sizeof(unsigned long)];
+
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+namespace {
+
+__global__ void query_cuda_kernel_arch(int *d_arch) {
+#ifdef _NVHPC_CUDA
+  *d_arch = __builtin_current_device_sm() * 10;
+#else
+#if defined(__CUDA_ARCH__)
+  *d_arch = __CUDA_ARCH__;
+#else
+  *d_arch = 0;
+#endif
+#endif
+}
+
+/** Query what compute capability is actually launched to the device: */
+int cuda_kernel_arch() {
+  int arch    = 0;
+  int *d_arch = nullptr;
+
+  cudaMalloc(reinterpret_cast<void **>(&d_arch), sizeof(int));
+  cudaMemcpy(d_arch, &arch, sizeof(int), cudaMemcpyDefault);
+
+  query_cuda_kernel_arch<<<1, 1>>>(d_arch);
+
+  cudaMemcpy(&arch, d_arch, sizeof(int), cudaMemcpyDefault);
+  cudaFree(d_arch);
+  return arch;
+}
+
+}  // namespace
+
+Kokkos::View<uint32_t *, Kokkos::CudaSpace> cuda_global_unique_token_locks(
+    bool deallocate) {
+  static Kokkos::View<uint32_t *, Kokkos::CudaSpace> locks =
+      Kokkos::View<uint32_t *, Kokkos::CudaSpace>();
+  if (!deallocate && locks.extent(0) == 0)
+    locks = Kokkos::View<uint32_t *, Kokkos::CudaSpace>(
+        "Kokkos::UniqueToken<Cuda>::m_locks", Kokkos::Cuda().concurrency());
+  if (deallocate) locks = Kokkos::View<uint32_t *, Kokkos::CudaSpace>();
+  return locks;
+}
+
+void cuda_device_synchronize(const std::string &name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
+      name,
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          GlobalDeviceSynchronization,
+      []() {  // TODO: correct device ID
+        KOKKOS_IMPL_CUDA_SAFE_CALL(cudaDeviceSynchronize());
+      });
+}
+
+void cuda_stream_synchronize(const cudaStream_t stream, const CudaInternal *ptr,
+                             const std::string &name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
+      name,
+      Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
+          ptr->impl_get_instance_id()},
+      [&]() {  // TODO: correct device ID
+        KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
+      });
+}
+
+void cuda_stream_synchronize(
+    const cudaStream_t stream,
+    Kokkos::Tools::Experimental::SpecialSynchronizationCases reason,
+    const std::string &name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Cuda>(
+      name, reason, [&]() {  // TODO: correct device ID
+        KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamSynchronize(stream));
+      });
+}
+
+void cuda_internal_error_throw(cudaError e, const char *name, const char *file,
+                               const int line) {
+  std::ostringstream out;
+  out << name << " error( " << cudaGetErrorName(e)
+      << "): " << cudaGetErrorString(e);
+  if (file) {
+    out << " " << file << ":" << line;
+  }
+  throw_runtime_exception(out.str());
+}
+
+void cuda_internal_error_abort(cudaError e, const char *name, const char *file,
+                               const int line) {
+  std::ostringstream out;
+  out << name << " error( " << cudaGetErrorName(e)
+      << "): " << cudaGetErrorString(e);
+  if (file) {
+    out << " " << file << ":" << line;
+  }
+  abort(out.str().c_str());
+}
+
+//----------------------------------------------------------------------------
+// Some significant cuda device properties:
+//
+// cudaDeviceProp::name                : Text label for device
+// cudaDeviceProp::major               : Device major number
+// cudaDeviceProp::minor               : Device minor number
+// cudaDeviceProp::warpSize            : number of threads per warp
+// cudaDeviceProp::multiProcessorCount : number of multiprocessors
+// cudaDeviceProp::sharedMemPerBlock   : capacity of shared memory per block
+// cudaDeviceProp::totalConstMem       : capacity of constant memory
+// cudaDeviceProp::totalGlobalMem      : capacity of global memory
+// cudaDeviceProp::maxGridSize[3]      : maximum grid size
+
+//
+//  Section 4.4.2.4 of the CUDA Toolkit Reference Manual
+//
+// struct cudaDeviceProp {
+//   char name[256];
+//   size_t totalGlobalMem;
+//   size_t sharedMemPerBlock;
+//   int regsPerBlock;
+//   int warpSize;
+//   size_t memPitch;
+//   int maxThreadsPerBlock;
+//   int maxThreadsDim[3];
+//   int maxGridSize[3];
+//   size_t totalConstMem;
+//   int major;
+//   int minor;
+//   int clockRate;
+//   size_t textureAlignment;
+//   int deviceOverlap;
+//   int multiProcessorCount;
+//   int kernelExecTimeoutEnabled;
+//   int integrated;
+//   int canMapHostMemory;
+//   int computeMode;
+//   int concurrentKernels;
+//   int ECCEnabled;
+//   int pciBusID;
+//   int pciDeviceID;
+//   int tccDriver;
+//   int asyncEngineCount;
+//   int unifiedAddressing;
+//   int memoryClockRate;
+//   int memoryBusWidth;
+//   int l2CacheSize;
+//   int maxThreadsPerMultiProcessor;
+// };
+
+namespace {
+
+class CudaInternalDevices {
+ public:
+  enum { MAXIMUM_DEVICE_COUNT = 64 };
+  struct cudaDeviceProp m_cudaProp[MAXIMUM_DEVICE_COUNT];
+  int m_cudaDevCount;
+
+  CudaInternalDevices();
+
+  static const CudaInternalDevices &singleton();
+};
+
+CudaInternalDevices::CudaInternalDevices() {
+  // See 'cudaSetDeviceFlags' for host-device thread interaction
+  // Section 4.4.2.6 of the CUDA Toolkit Reference Manual
+
+  KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceCount(&m_cudaDevCount));
+
+  if (m_cudaDevCount > MAXIMUM_DEVICE_COUNT) {
+    Kokkos::abort(
+        "Sorry, you have more GPUs per node than we thought anybody would ever "
+        "have. Please report this to github.com/kokkos/kokkos.");
+  }
+  for (int i = 0; i < m_cudaDevCount; ++i) {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetDeviceProperties(m_cudaProp + i, i));
+  }
+}
+
+const CudaInternalDevices &CudaInternalDevices::singleton() {
+  static CudaInternalDevices self;
+  return self;
+}
+
+}  // namespace
+
+unsigned long *CudaInternal::constantMemHostStaging = nullptr;
+cudaEvent_t CudaInternal::constantMemReusable       = nullptr;
+std::mutex CudaInternal::constantMemMutex;
+
+//----------------------------------------------------------------------------
+
+void CudaInternal::print_configuration(std::ostream &s) const {
+  const CudaInternalDevices &dev_info = CudaInternalDevices::singleton();
+
+#if defined(KOKKOS_ENABLE_CUDA)
+  s << "macro  KOKKOS_ENABLE_CUDA      : defined\n";
+#endif
+#if defined(CUDA_VERSION)
+  s << "macro  CUDA_VERSION          = " << CUDA_VERSION << " = version "
+    << CUDA_VERSION / 1000 << "." << (CUDA_VERSION % 1000) / 10 << '\n';
+#endif
+
+  for (int i = 0; i < dev_info.m_cudaDevCount; ++i) {
+    s << "Kokkos::Cuda[ " << i << " ] " << dev_info.m_cudaProp[i].name
+      << " capability " << dev_info.m_cudaProp[i].major << "."
+      << dev_info.m_cudaProp[i].minor << ", Total Global Memory: "
+      << human_memory_size(dev_info.m_cudaProp[i].totalGlobalMem)
+      << ", Shared Memory per Block: "
+      << human_memory_size(dev_info.m_cudaProp[i].sharedMemPerBlock);
+    if (m_cudaDev == i) s << " : Selected";
+    s << std::endl;
+  }
+}
+
+//----------------------------------------------------------------------------
+
+CudaInternal::~CudaInternal() {
+  if (m_stream || m_scratchSpace || m_scratchFlags || m_scratchUnified) {
+    std::cerr << "Kokkos::Cuda ERROR: Failed to call Kokkos::Cuda::finalize()"
+              << std::endl;
+  }
+
+  m_cudaDev                 = -1;
+  m_cudaArch                = -1;
+  m_multiProcCount          = 0;
+  m_maxWarpCount            = 0;
+  m_maxBlock                = {0, 0, 0};
+  m_maxSharedWords          = 0;
+  m_maxConcurrency          = 0;
+  m_scratchSpaceCount       = 0;
+  m_scratchFlagsCount       = 0;
+  m_scratchUnifiedCount     = 0;
+  m_scratchUnifiedSupported = 0;
+  m_streamCount             = 0;
+  m_scratchSpace            = nullptr;
+  m_scratchFlags            = nullptr;
+  m_scratchUnified          = nullptr;
+  m_stream                  = nullptr;
+  for (int i = 0; i < m_n_team_scratch; ++i) {
+    m_team_scratch_current_size[i] = 0;
+    m_team_scratch_ptr[i]          = nullptr;
+  }
+}
+
+int CudaInternal::verify_is_initialized(const char *const label) const {
+  if (m_cudaDev < 0) {
+    Kokkos::abort((std::string("Kokkos::Cuda::") + label +
+                   " : ERROR device not initialized\n")
+                      .c_str());
+  }
+  return 0 <= m_cudaDev;
+}
+uint32_t CudaInternal::impl_get_instance_id() const { return m_instance_id; }
+CudaInternal &CudaInternal::singleton() {
+  static CudaInternal self;
+  return self;
+}
+void CudaInternal::fence(const std::string &name) const {
+  Impl::cuda_stream_synchronize(m_stream, this, name);
+}
+void CudaInternal::fence() const {
+  fence("Kokkos::CudaInternal::fence(): Unnamed Instance Fence");
+}
+
+void CudaInternal::initialize(int cuda_device_id, cudaStream_t stream,
+                              bool manage_stream) {
+  if (was_finalized)
+    Kokkos::abort("Calling Cuda::initialize after Cuda::finalize is illegal\n");
+  was_initialized = true;
+  if (is_initialized()) return;
+
+  enum { WordSize = sizeof(size_type) };
+
+#ifndef KOKKOS_IMPL_TURN_OFF_CUDA_HOST_INIT_CHECK
+  if (!HostSpace::execution_space::impl_is_initialized()) {
+    const std::string msg(
+        "Cuda::initialize ERROR : HostSpace::execution_space is not "
+        "initialized");
+    throw_runtime_exception(msg);
+  }
+#endif
+
+  const CudaInternalDevices &dev_info = CudaInternalDevices::singleton();
+
+  const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
+
+  const bool ok_id =
+      0 <= cuda_device_id && cuda_device_id < dev_info.m_cudaDevCount;
+
+  // Need device capability 3.0 or better
+
+  const bool ok_dev =
+      ok_id && (3 <= dev_info.m_cudaProp[cuda_device_id].major &&
+                0 <= dev_info.m_cudaProp[cuda_device_id].minor);
+
+  if (ok_init && ok_dev) {
+    const struct cudaDeviceProp &cudaProp = dev_info.m_cudaProp[cuda_device_id];
+
+    m_cudaDev    = cuda_device_id;
+    m_deviceProp = cudaProp;
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaSetDevice(m_cudaDev));
+    Kokkos::Impl::cuda_device_synchronize(
+        "Kokkos::CudaInternal::initialize: Fence on space initialization");
+
+    // Query what compute capability architecture a kernel executes:
+    m_cudaArch = cuda_kernel_arch();
+
+    if (m_cudaArch == 0) {
+      std::stringstream ss;
+      ss << "Kokkos::Cuda::initialize ERROR: likely mismatch of architecture\n";
+      std::string msg = ss.str();
+      Kokkos::abort(msg.c_str());
+    }
+
+    int compiled_major = m_cudaArch / 100;
+    int compiled_minor = (m_cudaArch % 100) / 10;
+
+    if (compiled_major != cudaProp.major || compiled_minor > cudaProp.minor) {
+      std::stringstream ss;
+      ss << "Kokkos::Cuda::initialize ERROR: running kernels compiled for "
+            "compute capability "
+         << compiled_major << "." << compiled_minor
+         << " on device with compute capability " << cudaProp.major << "."
+         << cudaProp.minor << " is not supported by CUDA!\n";
+      std::string msg = ss.str();
+      Kokkos::abort(msg.c_str());
+    }
+    if (Kokkos::show_warnings() && (compiled_major != cudaProp.major ||
+                                    compiled_minor != cudaProp.minor)) {
+      std::cerr << "Kokkos::Cuda::initialize WARNING: running kernels compiled "
+                   "for compute capability "
+                << compiled_major << "." << compiled_minor
+                << " on device with compute capability " << cudaProp.major
+                << "." << cudaProp.minor
+                << " , this will likely reduce potential performance."
+                << std::endl;
+    }
+
+    // number of multiprocessors
+
+    m_multiProcCount = cudaProp.multiProcessorCount;
+
+    //----------------------------------
+    // Maximum number of warps,
+    // at most one warp per thread in a warp for reduction.
+
+    m_maxWarpCount = cudaProp.maxThreadsPerBlock / Impl::CudaTraits::WarpSize;
+
+    if (Impl::CudaTraits::WarpSize < m_maxWarpCount) {
+      m_maxWarpCount = Impl::CudaTraits::WarpSize;
+    }
+
+    m_maxSharedWords = cudaProp.sharedMemPerBlock / WordSize;
+
+    //----------------------------------
+    // Maximum number of blocks:
+
+    m_maxBlock[0] = cudaProp.maxGridSize[0];
+    m_maxBlock[1] = cudaProp.maxGridSize[1];
+    m_maxBlock[2] = cudaProp.maxGridSize[2];
+
+    m_shmemPerSM       = cudaProp.sharedMemPerMultiprocessor;
+    m_maxShmemPerBlock = cudaProp.sharedMemPerBlock;
+    m_regsPerSM        = cudaProp.regsPerMultiprocessor;
+    m_maxBlocksPerSM =
+        m_cudaArch < 500
+            ? 16
+            : (m_cudaArch < 750 ? 32 : (m_cudaArch == 750 ? 16 : 32));
+    m_maxThreadsPerSM    = cudaProp.maxThreadsPerMultiProcessor;
+    m_maxThreadsPerBlock = cudaProp.maxThreadsPerBlock;
+
+    //----------------------------------
+
+    m_scratchUnifiedSupported = cudaProp.unifiedAddressing;
+
+    if (Kokkos::show_warnings() && !m_scratchUnifiedSupported) {
+      std::cerr << "Kokkos::Cuda device " << cudaProp.name << " capability "
+                << cudaProp.major << "." << cudaProp.minor
+                << " does not support unified virtual address space"
+                << std::endl;
+    }
+
+    //----------------------------------
+    // Multiblock reduction uses scratch flags for counters
+    // and scratch space for partial reduction values.
+    // Allocate some initial space.  This will grow as needed.
+
+    {
+      const unsigned reduce_block_count =
+          m_maxWarpCount * Impl::CudaTraits::WarpSize;
+
+      (void)scratch_unified(16 * sizeof(size_type));
+      (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type));
+      (void)scratch_space(reduce_block_count * 16 * sizeof(size_type));
+    }
+    //----------------------------------
+    // Concurrent bitset for obtaining unique tokens from within
+    // an executing kernel.
+    {
+      m_maxConcurrency = m_maxThreadsPerSM * cudaProp.multiProcessorCount;
+
+      const int32_t buffer_bound =
+          Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);
+
+      // Allocate and initialize uint32_t[ buffer_bound ]
+
+      using Record =
+          Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
+
+      Record *const r =
+          Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchBitset",
+                           sizeof(uint32_t) * buffer_bound);
+
+      Record::increment(r);
+    }
+    //----------------------------------
+
+  } else {
+    std::ostringstream msg;
+    msg << "Kokkos::Cuda::initialize(" << cuda_device_id << ") FAILED";
+
+    if (!ok_init) {
+      msg << " : Already initialized";
+    }
+    if (!ok_id) {
+      msg << " : Device identifier out of range "
+          << "[0.." << dev_info.m_cudaDevCount << "]";
+    } else if (!ok_dev) {
+      msg << " : Device ";
+      msg << dev_info.m_cudaProp[cuda_device_id].major;
+      msg << ".";
+      msg << dev_info.m_cudaProp[cuda_device_id].minor;
+      msg << " has insufficient capability, required 3.0 or better";
+    }
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+
+#ifdef KOKKOS_ENABLE_CUDA_UVM
+  const char *env_force_device_alloc =
+      getenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC");
+  bool force_device_alloc;
+  if (env_force_device_alloc == nullptr)
+    force_device_alloc = false;
+  else
+    force_device_alloc = std::stoi(env_force_device_alloc) != 0;
+
+  const char *env_visible_devices = getenv("CUDA_VISIBLE_DEVICES");
+  bool visible_devices_one        = true;
+  if (env_visible_devices == nullptr) visible_devices_one = false;
+
+  if (Kokkos::show_warnings() &&
+      (!visible_devices_one && !force_device_alloc)) {
+    std::cerr << R"warning(
+Kokkos::Cuda::initialize WARNING: Cuda is allocating into UVMSpace by default
+                                  without setting CUDA_MANAGED_FORCE_DEVICE_ALLOC=1 or
+                                  setting CUDA_VISIBLE_DEVICES.
+                                  This could on multi GPU systems lead to severe performance"
+                                  penalties.)warning"
+              << std::endl;
+  }
+#endif
+
+#ifdef KOKKOS_ENABLE_PRE_CUDA_10_DEPRECATION_API
+  cudaThreadSetCacheConfig(cudaFuncCachePreferShared);
+#else
+  cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
+#endif
+
+  // Init the array for used for arbitrarily sized atomics
+  if (stream == nullptr) Impl::initialize_host_cuda_lock_arrays();
+
+  // Allocate a staging buffer for constant mem in pinned host memory
+  // and an event to avoid overwriting driver for previous kernel launches
+  if (stream == nullptr) {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaMallocHost(reinterpret_cast<void **>(&constantMemHostStaging),
+                       CudaTraits::ConstantMemoryUsage));
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaEventCreate(&constantMemReusable));
+  }
+
+  m_stream        = stream;
+  m_manage_stream = manage_stream;
+  for (int i = 0; i < m_n_team_scratch; ++i) {
+    m_team_scratch_current_size[i] = 0;
+    m_team_scratch_ptr[i]          = nullptr;
+  }
+
+  KOKKOS_IMPL_CUDA_SAFE_CALL(
+      cudaMalloc(&m_scratch_locks, sizeof(int32_t) * m_maxConcurrency));
+  KOKKOS_IMPL_CUDA_SAFE_CALL(
+      cudaMemset(m_scratch_locks, 0, sizeof(int32_t) * m_maxConcurrency));
+}
+
+//----------------------------------------------------------------------------
+
+using ScratchGrain = Cuda::size_type[Impl::CudaTraits::WarpSize];
+enum { sizeScratchGrain = sizeof(ScratchGrain) };
+
+Cuda::size_type *CudaInternal::scratch_flags(const std::size_t size) const {
+  if (verify_is_initialized("scratch_flags") &&
+      m_scratchFlagsCount * sizeScratchGrain < size) {
+    m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
+
+    using Record =
+        Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
+
+    if (m_scratchFlags) Record::decrement(Record::get_record(m_scratchFlags));
+
+    Record *const r =
+        Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchFlags",
+                         (sizeof(ScratchGrain) * m_scratchFlagsCount));
+
+    Record::increment(r);
+
+    m_scratchFlags = reinterpret_cast<size_type *>(r->data());
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaMemset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain));
+  }
+
+  return m_scratchFlags;
+}
+
+Cuda::size_type *CudaInternal::scratch_space(const std::size_t size) const {
+  if (verify_is_initialized("scratch_space") &&
+      m_scratchSpaceCount * sizeScratchGrain < size) {
+    m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
+
+    using Record =
+        Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
+
+    if (m_scratchSpace) Record::decrement(Record::get_record(m_scratchSpace));
+
+    Record *const r =
+        Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchSpace",
+                         (sizeof(ScratchGrain) * m_scratchSpaceCount));
+
+    Record::increment(r);
+
+    m_scratchSpace = reinterpret_cast<size_type *>(r->data());
+  }
+
+  return m_scratchSpace;
+}
+
+Cuda::size_type *CudaInternal::scratch_unified(const std::size_t size) const {
+  if (verify_is_initialized("scratch_unified") && m_scratchUnifiedSupported &&
+      m_scratchUnifiedCount * sizeScratchGrain < size) {
+    m_scratchUnifiedCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
+
+    using Record =
+        Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>;
+
+    if (m_scratchUnified)
+      Record::decrement(Record::get_record(m_scratchUnified));
+
+    Record *const r = Record::allocate(
+        Kokkos::CudaHostPinnedSpace(), "Kokkos::InternalScratchUnified",
+        (sizeof(ScratchGrain) * m_scratchUnifiedCount));
+
+    Record::increment(r);
+
+    m_scratchUnified = reinterpret_cast<size_type *>(r->data());
+  }
+
+  return m_scratchUnified;
+}
+
+Cuda::size_type *CudaInternal::scratch_functor(const std::size_t size) const {
+  if (verify_is_initialized("scratch_functor") && m_scratchFunctorSize < size) {
+    m_scratchFunctorSize = size;
+
+    using Record =
+        Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
+
+    if (m_scratchFunctor)
+      Record::decrement(Record::get_record(m_scratchFunctor));
+
+    Record *const r =
+        Record::allocate(Kokkos::CudaSpace(), "Kokkos::InternalScratchFunctor",
+                         m_scratchFunctorSize);
+
+    Record::increment(r);
+
+    m_scratchFunctor = reinterpret_cast<size_type *>(r->data());
+  }
+
+  return m_scratchFunctor;
+}
+
+std::pair<void *, int> CudaInternal::resize_team_scratch_space(
+    std::int64_t bytes, bool force_shrink) {
+  // Multiple ParallelFor/Reduce Teams can call this function at the same time
+  // and invalidate the m_team_scratch_ptr. We use a pool to avoid any race
+  // condition.
+
+  int current_team_scratch = 0;
+  int zero                 = 0;
+  int one                  = 1;
+  while (!m_team_scratch_pool[current_team_scratch].compare_exchange_weak(
+      zero, one, std::memory_order_release, std::memory_order_relaxed)) {
+    current_team_scratch = (current_team_scratch + 1) % m_n_team_scratch;
+  }
+  if (m_team_scratch_current_size[current_team_scratch] == 0) {
+    m_team_scratch_current_size[current_team_scratch] = bytes;
+    m_team_scratch_ptr[current_team_scratch] =
+        Kokkos::kokkos_malloc<Kokkos::CudaSpace>(
+            "Kokkos::CudaSpace::TeamScratchMemory",
+            m_team_scratch_current_size[current_team_scratch]);
+  }
+  if ((bytes > m_team_scratch_current_size[current_team_scratch]) ||
+      ((bytes < m_team_scratch_current_size[current_team_scratch]) &&
+       (force_shrink))) {
+    m_team_scratch_current_size[current_team_scratch] = bytes;
+    m_team_scratch_ptr[current_team_scratch] =
+        Kokkos::kokkos_realloc<Kokkos::CudaSpace>(
+            m_team_scratch_ptr[current_team_scratch],
+            m_team_scratch_current_size[current_team_scratch]);
+  }
+  return std::make_pair(m_team_scratch_ptr[current_team_scratch],
+                        current_team_scratch);
+}
+
+//----------------------------------------------------------------------------
+
+void CudaInternal::finalize() {
+  // skip if finalize() has already been called
+  if (was_finalized) return;
+
+  was_finalized = true;
+
+  // Only finalize this if we're the singleton
+  if (this == &singleton()) {
+    (void)Impl::cuda_global_unique_token_locks(true);
+    Impl::finalize_host_cuda_lock_arrays();
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFreeHost(constantMemHostStaging));
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaEventDestroy(constantMemReusable));
+    auto &deep_copy_space =
+        Kokkos::Impl::cuda_get_deep_copy_space(/*initialize*/ false);
+    if (deep_copy_space)
+      deep_copy_space->impl_internal_space_instance()->finalize();
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamDestroy(cuda_get_deep_copy_stream()));
+  }
+
+  if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {
+    using RecordCuda = Kokkos::Impl::SharedAllocationRecord<CudaSpace>;
+    using RecordHost =
+        Kokkos::Impl::SharedAllocationRecord<CudaHostPinnedSpace>;
+
+    RecordCuda::decrement(RecordCuda::get_record(m_scratchFlags));
+    RecordCuda::decrement(RecordCuda::get_record(m_scratchSpace));
+    RecordHost::decrement(RecordHost::get_record(m_scratchUnified));
+    if (m_scratchFunctorSize > 0)
+      RecordCuda::decrement(RecordCuda::get_record(m_scratchFunctor));
+  }
+
+  for (int i = 0; i < m_n_team_scratch; ++i) {
+    if (m_team_scratch_current_size[i] > 0)
+      Kokkos::kokkos_free<Kokkos::CudaSpace>(m_team_scratch_ptr[i]);
+  }
+
+  if (m_manage_stream && m_stream != nullptr)
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamDestroy(m_stream));
+
+  m_cudaDev             = -1;
+  m_multiProcCount      = 0;
+  m_maxWarpCount        = 0;
+  m_maxBlock            = {0, 0, 0};
+  m_maxSharedWords      = 0;
+  m_scratchSpaceCount   = 0;
+  m_scratchFlagsCount   = 0;
+  m_scratchUnifiedCount = 0;
+  m_streamCount         = 0;
+  m_scratchSpace        = nullptr;
+  m_scratchFlags        = nullptr;
+  m_scratchUnified      = nullptr;
+  m_stream              = nullptr;
+  for (int i = 0; i < m_n_team_scratch; ++i) {
+    m_team_scratch_current_size[i] = 0;
+    m_team_scratch_ptr[i]          = nullptr;
+  }
+
+  KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFree(m_scratch_locks));
+  m_scratch_locks = nullptr;
+}
+
+//----------------------------------------------------------------------------
+
+Cuda::size_type cuda_internal_multiprocessor_count() {
+  return CudaInternal::singleton().m_multiProcCount;
+}
+
+CudaSpace::size_type cuda_internal_maximum_concurrent_block_count() {
+#if defined(KOKKOS_ARCH_KEPLER)
+  // Compute capability 3.0 through 3.7
+  enum : int { max_resident_blocks_per_multiprocessor = 16 };
+#else
+  // Compute capability 5.0 through 6.2
+  enum : int { max_resident_blocks_per_multiprocessor = 32 };
+#endif
+  return CudaInternal::singleton().m_multiProcCount *
+         max_resident_blocks_per_multiprocessor;
+};
+
+Cuda::size_type cuda_internal_maximum_warp_count() {
+  return CudaInternal::singleton().m_maxWarpCount;
+}
+
+std::array<Cuda::size_type, 3> cuda_internal_maximum_grid_count() {
+  return CudaInternal::singleton().m_maxBlock;
+}
+
+Cuda::size_type cuda_internal_maximum_shared_words() {
+  return CudaInternal::singleton().m_maxSharedWords;
+}
+
+Cuda::size_type *cuda_internal_scratch_space(const Cuda &instance,
+                                             const std::size_t size) {
+  return instance.impl_internal_space_instance()->scratch_space(size);
+}
+
+Cuda::size_type *cuda_internal_scratch_flags(const Cuda &instance,
+                                             const std::size_t size) {
+  return instance.impl_internal_space_instance()->scratch_flags(size);
+}
+
+Cuda::size_type *cuda_internal_scratch_unified(const Cuda &instance,
+                                               const std::size_t size) {
+  return instance.impl_internal_space_instance()->scratch_unified(size);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+Cuda::size_type Cuda::detect_device_count() {
+  return Impl::CudaInternalDevices::singleton().m_cudaDevCount;
+}
+
+int Cuda::concurrency() {
+  return Impl::CudaInternal::singleton().m_maxConcurrency;
+}
+
+int Cuda::impl_is_initialized() {
+  return Impl::CudaInternal::singleton().is_initialized();
+}
+
+void Cuda::impl_initialize(InitializationSettings const &settings) {
+  Impl::CudaInternal::singleton().initialize(Impl::get_gpu(settings));
+
+  // In order to support setting an atexit hook for Kokkos::finalize
+  // We need to ensure that the Cuda deep_copy instance is not destroyed
+  // before that atexit hook is getting called.
+  // Thus we create the static instance here, so that it will be deallocated
+  // after the potential atexit call.
+  // This is neccessary since we will access that instance in Kokkos::finalize
+  (void)::Kokkos::Impl::cuda_get_deep_copy_space(true);
+}
+
+std::vector<unsigned> Cuda::detect_device_arch() {
+  const Impl::CudaInternalDevices &s = Impl::CudaInternalDevices::singleton();
+
+  std::vector<unsigned> output(s.m_cudaDevCount);
+
+  for (int i = 0; i < s.m_cudaDevCount; ++i) {
+    output[i] = s.m_cudaProp[i].major * 100 + s.m_cudaProp[i].minor;
+  }
+
+  return output;
+}
+
+Cuda::size_type Cuda::device_arch() {
+  const int dev_id = Impl::CudaInternal::singleton().m_cudaDev;
+
+  int dev_arch = 0;
+
+  if (0 <= dev_id) {
+    const struct cudaDeviceProp &cudaProp =
+        Impl::CudaInternalDevices::singleton().m_cudaProp[dev_id];
+
+    dev_arch = cudaProp.major * 100 + cudaProp.minor;
+  }
+
+  return dev_arch;
+}
+
+void Cuda::impl_finalize() { Impl::CudaInternal::singleton().finalize(); }
+
+Cuda::Cuda()
+    : m_space_instance(&Impl::CudaInternal::singleton(),
+                       [](Impl::CudaInternal *) {}) {
+  Impl::CudaInternal::singleton().verify_is_initialized(
+      "Cuda instance constructor");
+}
+
+Cuda::Cuda(cudaStream_t stream, bool manage_stream)
+    : m_space_instance(new Impl::CudaInternal, [](Impl::CudaInternal *ptr) {
+        ptr->finalize();
+        delete ptr;
+      }) {
+  Impl::CudaInternal::singleton().verify_is_initialized(
+      "Cuda instance constructor");
+  m_space_instance->initialize(Impl::CudaInternal::singleton().m_cudaDev,
+                               stream, manage_stream);
+}
+
+void Cuda::print_configuration(std::ostream &os, bool /*verbose*/) const {
+  os << "Device Execution Space:\n";
+  os << "  KOKKOS_ENABLE_CUDA: yes\n";
+
+  os << "Cuda Atomics:\n";
+  os << "  KOKKOS_ENABLE_CUDA_ATOMICS: ";
+#ifdef KOKKOS_ENABLE_CUDA_ATOMICS
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+
+  os << "Cuda Options:\n";
+  os << "  KOKKOS_ENABLE_CUDA_LAMBDA: ";
+#ifdef KOKKOS_ENABLE_CUDA_LAMBDA
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+  os << "  KOKKOS_ENABLE_CUDA_LDG_INTRINSIC: ";
+#ifdef KOKKOS_ENABLE_CUDA_LDG_INTRINSIC
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+  os << "  KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE: ";
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+  os << "  KOKKOS_ENABLE_CUDA_UVM: ";
+#ifdef KOKKOS_ENABLE_CUDA_UVM
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+  os << "  KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA: ";
+#ifdef KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+
+  os << "\nCuda Runtime Configuration:\n";
+
+  m_space_instance->print_configuration(os);
+}
+
+void Cuda::impl_static_fence(const std::string &name) {
+  Kokkos::Impl::cuda_device_synchronize(name);
+}
+
+void Cuda::fence(const std::string &name) const {
+  m_space_instance->fence(name);
+}
+
+const char *Cuda::name() { return "Cuda"; }
+uint32_t Cuda::impl_instance_id() const noexcept {
+  return m_space_instance->impl_get_instance_id();
+}
+
+cudaStream_t Cuda::cuda_stream() const { return m_space_instance->m_stream; }
+int Cuda::cuda_device() const { return m_space_instance->m_cudaDev; }
+const cudaDeviceProp &Cuda::cuda_device_prop() const {
+  return m_space_instance->m_deviceProp;
+}
+
+namespace Impl {
+
+int g_cuda_space_factory_initialized =
+    initialize_space_factory<Cuda>("150_Cuda");
+
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_CXX14
+namespace Tools {
+namespace Experimental {
+constexpr DeviceType DeviceTypeTraits<Cuda>::id;
+}
+}  // namespace Tools
+#endif
+
+}  // namespace Kokkos
+
+#else
+
+void KOKKOS_CORE_SRC_CUDA_IMPL_PREVENT_LINK_ERROR() {}
+
+#endif  // KOKKOS_ENABLE_CUDA
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Instance.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Instance.hpp
new file mode 100644 (file)
index 0000000..62b1f09
--- /dev/null
@@ -0,0 +1,251 @@
+#ifndef KOKKOS_CUDA_INSTANCE_HPP_
+#define KOKKOS_CUDA_INSTANCE_HPP_
+
+#include <vector>
+#include <impl/Kokkos_Tools.hpp>
+#include <atomic>
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// These functions fulfill the purpose of allowing to work around
+// a suspected system software issue, or to check for race conditions.
+// They are not currently a fully officially supported capability.
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+extern "C" void kokkos_impl_cuda_set_serial_execution(bool);
+extern "C" bool kokkos_impl_cuda_use_serial_execution();
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+struct CudaTraits {
+  static constexpr CudaSpace::size_type WarpSize = 32 /* 0x0020 */;
+  static constexpr CudaSpace::size_type WarpIndexMask =
+      0x001f; /* Mask for warpindex */
+  static constexpr CudaSpace::size_type WarpIndexShift =
+      5; /* WarpSize == 1 << WarpShift */
+
+  static constexpr CudaSpace::size_type ConstantMemoryUsage =
+      0x008000; /* 32k bytes */
+  static constexpr CudaSpace::size_type ConstantMemoryCache =
+      0x002000; /*  8k bytes */
+  static constexpr CudaSpace::size_type KernelArgumentLimit =
+      0x001000; /*  4k bytes */
+  static constexpr CudaSpace::size_type MaxHierarchicalParallelism =
+      1024; /* team_size * vector_length */
+  using ConstantGlobalBufferType =
+      unsigned long[ConstantMemoryUsage / sizeof(unsigned long)];
+
+  static constexpr int ConstantMemoryUseThreshold = 0x000200 /* 512 bytes */;
+
+  KOKKOS_INLINE_FUNCTION static CudaSpace::size_type warp_count(
+      CudaSpace::size_type i) {
+    return (i + WarpIndexMask) >> WarpIndexShift;
+  }
+
+  KOKKOS_INLINE_FUNCTION static CudaSpace::size_type warp_align(
+      CudaSpace::size_type i) {
+    constexpr CudaSpace::size_type Mask = ~WarpIndexMask;
+    return (i + WarpIndexMask) & Mask;
+  }
+};
+
+//----------------------------------------------------------------------------
+
+CudaSpace::size_type cuda_internal_multiprocessor_count();
+CudaSpace::size_type cuda_internal_maximum_warp_count();
+std::array<CudaSpace::size_type, 3> cuda_internal_maximum_grid_count();
+CudaSpace::size_type cuda_internal_maximum_shared_words();
+
+CudaSpace::size_type cuda_internal_maximum_concurrent_block_count();
+
+CudaSpace::size_type* cuda_internal_scratch_flags(const Cuda&,
+                                                  const std::size_t size);
+CudaSpace::size_type* cuda_internal_scratch_space(const Cuda&,
+                                                  const std::size_t size);
+CudaSpace::size_type* cuda_internal_scratch_unified(const Cuda&,
+                                                    const std::size_t size);
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+namespace Kokkos {
+namespace Impl {
+
+class CudaInternal {
+ private:
+  CudaInternal(const CudaInternal&);
+  CudaInternal& operator=(const CudaInternal&);
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+  static bool kokkos_impl_cuda_use_serial_execution_v;
+#endif
+
+ public:
+  using size_type = Cuda::size_type;
+
+  int m_cudaDev;
+
+  // Device Properties
+  int m_cudaArch;
+  unsigned m_multiProcCount;
+  unsigned m_maxWarpCount;
+  std::array<size_type, 3> m_maxBlock;
+  unsigned m_maxSharedWords;
+  uint32_t m_maxConcurrency;
+  int m_shmemPerSM;
+  int m_maxShmemPerBlock;
+  int m_regsPerSM;
+  int m_maxBlocksPerSM;
+  int m_maxThreadsPerSM;
+  int m_maxThreadsPerBlock;
+
+  cudaDeviceProp m_deviceProp;
+
+  // Scratch Spaces for Reductions
+  mutable std::size_t m_scratchSpaceCount;
+  mutable std::size_t m_scratchFlagsCount;
+  mutable std::size_t m_scratchUnifiedCount;
+  mutable std::size_t m_scratchFunctorSize;
+
+  size_type m_scratchUnifiedSupported;
+  size_type m_streamCount;
+  mutable size_type* m_scratchSpace;
+  mutable size_type* m_scratchFlags;
+  mutable size_type* m_scratchUnified;
+  mutable size_type* m_scratchFunctor;
+  cudaStream_t m_stream;
+  uint32_t m_instance_id;
+  bool m_manage_stream;
+
+  // Team Scratch Level 1 Space
+  int m_n_team_scratch = 10;
+  mutable int64_t m_team_scratch_current_size[10];
+  mutable void* m_team_scratch_ptr[10];
+  mutable std::atomic_int m_team_scratch_pool[10];
+  std::int32_t* m_scratch_locks;
+
+  bool was_initialized = false;
+  bool was_finalized   = false;
+
+  // FIXME_CUDA: these want to be per-device, not per-stream...  use of 'static'
+  //  here will break once there are multiple devices though
+  static unsigned long* constantMemHostStaging;
+  static cudaEvent_t constantMemReusable;
+  static std::mutex constantMemMutex;
+
+  static CudaInternal& singleton();
+
+  int verify_is_initialized(const char* const label) const;
+
+  int is_initialized() const {
+    return nullptr != m_scratchSpace && nullptr != m_scratchFlags;
+  }
+
+  void initialize(int cuda_device_id, cudaStream_t stream = nullptr,
+                  bool manage_stream = false);
+  void finalize();
+
+  void print_configuration(std::ostream&) const;
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+  static bool cuda_use_serial_execution();
+  static void cuda_set_serial_execution(bool);
+#endif
+
+  void fence(const std::string&) const;
+  void fence() const;
+
+  ~CudaInternal();
+
+  CudaInternal()
+      : m_cudaDev(-1),
+        m_cudaArch(-1),
+        m_multiProcCount(0),
+        m_maxWarpCount(0),
+        m_maxBlock({0, 0, 0}),
+        m_maxSharedWords(0),
+        m_maxConcurrency(0),
+        m_shmemPerSM(0),
+        m_maxShmemPerBlock(0),
+        m_regsPerSM(0),
+        m_maxBlocksPerSM(0),
+        m_maxThreadsPerSM(0),
+        m_maxThreadsPerBlock(0),
+        m_scratchSpaceCount(0),
+        m_scratchFlagsCount(0),
+        m_scratchUnifiedCount(0),
+        m_scratchFunctorSize(0),
+        m_scratchUnifiedSupported(0),
+        m_streamCount(0),
+        m_scratchSpace(nullptr),
+        m_scratchFlags(nullptr),
+        m_scratchUnified(nullptr),
+        m_scratchFunctor(nullptr),
+        m_stream(nullptr),
+        m_instance_id(
+            Kokkos::Tools::Experimental::Impl::idForInstance<Kokkos::Cuda>(
+                reinterpret_cast<uintptr_t>(this))) {
+    for (int i = 0; i < m_n_team_scratch; ++i) {
+      m_team_scratch_current_size[i] = 0;
+      m_team_scratch_ptr[i]          = nullptr;
+      m_team_scratch_pool[i]         = 0;
+    }
+  }
+
+  // Resizing of reduction related scratch spaces
+  size_type* scratch_space(const std::size_t size) const;
+  size_type* scratch_flags(const std::size_t size) const;
+  size_type* scratch_unified(const std::size_t size) const;
+  size_type* scratch_functor(const std::size_t size) const;
+  uint32_t impl_get_instance_id() const;
+  // Resizing of team level 1 scratch
+  std::pair<void*, int> resize_team_scratch_space(std::int64_t bytes,
+                                                  bool force_shrink = false);
+};
+
+}  // Namespace Impl
+
+namespace Experimental {
+// Partitioning an Execution Space: expects space and integer arguments for
+// relative weight
+//   Customization point for backends
+//   Default behavior is to return the passed in instance
+
+namespace Impl {
+inline void create_Cuda_instances(std::vector<Cuda>& instances) {
+  for (int s = 0; s < int(instances.size()); s++) {
+    cudaStream_t stream;
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaStreamCreate(&stream));
+    instances[s] = Cuda(stream, true);
+  }
+}
+}  // namespace Impl
+
+template <class... Args>
+std::vector<Cuda> partition_space(const Cuda&, Args...) {
+#ifdef __cpp_fold_expressions
+  static_assert(
+      (... && std::is_arithmetic_v<Args>),
+      "Kokkos Error: partitioning arguments must be integers or floats");
+#endif
+  std::vector<Cuda> instances(sizeof...(Args));
+  Impl::create_Cuda_instances(instances);
+  return instances;
+}
+
+template <class T>
+std::vector<Cuda> partition_space(const Cuda&, std::vector<T>& weights) {
+  static_assert(
+      std::is_arithmetic<T>::value,
+      "Kokkos Error: partitioning arguments must be integers or floats");
+
+  std::vector<Cuda> instances(weights.size());
+  Impl::create_Cuda_instances(instances);
+  return instances;
+}
+}  // namespace Experimental
+
+}  // Namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_KernelLaunch.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_KernelLaunch.hpp
new file mode 100644 (file)
index 0000000..88810b6
--- /dev/null
@@ -0,0 +1,724 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDAEXEC_HPP
+#define KOKKOS_CUDAEXEC_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <mutex>
+#include <string>
+#include <cstdint>
+#include <cmath>
+#include <Kokkos_Parallel.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <Cuda/Kokkos_Cuda_abort.hpp>
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+#include <Cuda/Kokkos_Cuda_Locks.hpp>
+#include <Cuda/Kokkos_Cuda_Instance.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp>
+#include <Cuda/Kokkos_Cuda_GraphNodeKernel.hpp>
+#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+/** \brief  Access to constant memory on the device */
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+
+__device__ __constant__ extern unsigned long
+    kokkos_impl_cuda_constant_memory_buffer[];
+
+#else
+
+__device__ __constant__ unsigned long kokkos_impl_cuda_constant_memory_buffer
+    [Kokkos::Impl::CudaTraits::ConstantMemoryUsage / sizeof(unsigned long)];
+
+#endif
+
+template <typename T>
+inline __device__ T* kokkos_impl_cuda_shared_memory() {
+  extern __shared__ Kokkos::CudaSpace::size_type sh[];
+  return (T*)sh;
+}
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+// See section B.17 of Cuda C Programming Guide Version 3.2
+// for discussion of
+//   __launch_bounds__(maxThreadsPerBlock,minBlocksPerMultiprocessor)
+// function qualifier which could be used to improve performance.
+//----------------------------------------------------------------------------
+// Maximize L1 cache and minimize shared memory:
+//   cudaFuncSetCacheConfig(MyKernel, cudaFuncCachePreferL1 );
+// For 2.0 capability: 48 KB L1 and 16 KB shared
+//----------------------------------------------------------------------------
+
+template <class DriverType>
+__global__ static void cuda_parallel_launch_constant_memory() {
+  const DriverType& driver =
+      *((const DriverType*)kokkos_impl_cuda_constant_memory_buffer);
+
+  driver();
+}
+
+template <class DriverType, unsigned int maxTperB, unsigned int minBperSM>
+__global__ __launch_bounds__(
+    maxTperB, minBperSM) static void cuda_parallel_launch_constant_memory() {
+  const DriverType& driver =
+      *((const DriverType*)kokkos_impl_cuda_constant_memory_buffer);
+
+  driver();
+}
+
+template <class DriverType>
+__global__ static void cuda_parallel_launch_local_memory(
+    const DriverType driver) {
+  driver();
+}
+
+template <class DriverType, unsigned int maxTperB, unsigned int minBperSM>
+__global__ __launch_bounds__(
+    maxTperB,
+    minBperSM) static void cuda_parallel_launch_local_memory(const DriverType
+                                                                 driver) {
+  driver();
+}
+
+template <class DriverType>
+__global__ static void cuda_parallel_launch_global_memory(
+    const DriverType* driver) {
+  driver->operator()();
+}
+
+template <class DriverType, unsigned int maxTperB, unsigned int minBperSM>
+__global__ __launch_bounds__(
+    maxTperB,
+    minBperSM) static void cuda_parallel_launch_global_memory(const DriverType*
+                                                                  driver) {
+  driver->operator()();
+}
+
+//==============================================================================
+// <editor-fold desc="Some helper functions for launch code readability"> {{{1
+
+inline bool is_empty_launch(dim3 const& grid, dim3 const& block) {
+  return (grid.x == 0) || ((block.x * block.y * block.z) == 0);
+}
+
+inline void check_shmem_request(CudaInternal const* cuda_instance, int shmem) {
+  if (cuda_instance->m_maxShmemPerBlock < shmem) {
+    Kokkos::Impl::throw_runtime_exception(
+        std::string("CudaParallelLaunch (or graph node creation) FAILED: shared"
+                    " memory request is too large"));
+  }
+}
+
+// This function needs to be template on DriverType and LaunchBounds
+// so that the static bool is unique for each type combo
+// KernelFuncPtr does not necessarily contain that type information.
+template <class DriverType, class LaunchBounds, class KernelFuncPtr>
+inline void configure_shmem_preference(KernelFuncPtr const& func,
+                                       bool prefer_shmem) {
+#ifndef KOKKOS_ARCH_KEPLER
+  // On Kepler the L1 has no benefit since it doesn't cache reads
+  auto set_cache_config = [&] {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaFuncSetCacheConfig(
+        func,
+        (prefer_shmem ? cudaFuncCachePreferShared : cudaFuncCachePreferL1)));
+    return prefer_shmem;
+  };
+  static bool cache_config_preference_cached = set_cache_config();
+  if (cache_config_preference_cached != prefer_shmem) {
+    cache_config_preference_cached = set_cache_config();
+  }
+#else
+  // Use the parameters so we don't get a warning
+  (void)func;
+  (void)prefer_shmem;
+#endif
+}
+
+template <class Policy>
+std::enable_if_t<Policy::experimental_contains_desired_occupancy>
+modify_launch_configuration_if_desired_occupancy_is_specified(
+    Policy const& policy, cudaDeviceProp const& properties,
+    cudaFuncAttributes const& attributes, dim3 const& block, int& shmem,
+    bool& prefer_shmem) {
+  int const block_size        = block.x * block.y * block.z;
+  int const desired_occupancy = policy.impl_get_desired_occupancy().value();
+
+  size_t const shmem_per_sm_prefer_l1 = get_shmem_per_sm_prefer_l1(properties);
+  size_t const static_shmem           = attributes.sharedSizeBytes;
+
+  // round to nearest integer and avoid division by zero
+  int active_blocks = std::max(
+      1, static_cast<int>(std::round(
+             static_cast<double>(properties.maxThreadsPerMultiProcessor) /
+             block_size * desired_occupancy / 100)));
+  int const dynamic_shmem =
+      shmem_per_sm_prefer_l1 / active_blocks - static_shmem;
+
+  if (dynamic_shmem > shmem) {
+    shmem        = dynamic_shmem;
+    prefer_shmem = false;
+  }
+}
+
+template <class Policy>
+std::enable_if_t<!Policy::experimental_contains_desired_occupancy>
+modify_launch_configuration_if_desired_occupancy_is_specified(
+    Policy const&, cudaDeviceProp const&, cudaFuncAttributes const&,
+    dim3 const& /*block*/, int& /*shmem*/, bool& /*prefer_shmem*/) {}
+
+// </editor-fold> end Some helper functions for launch code readability }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="DeduceCudaLaunchMechanism"> {{{2
+
+// Use local memory up to ConstantMemoryUseThreshold
+// Use global memory above ConstantMemoryUsage
+// In between use ConstantMemory
+
+template <class DriverType>
+struct DeduceCudaLaunchMechanism {
+  constexpr static const Kokkos::Experimental::WorkItemProperty::
+      HintLightWeight_t light_weight =
+          Kokkos::Experimental::WorkItemProperty::HintLightWeight;
+  constexpr static const Kokkos::Experimental::WorkItemProperty::
+      HintHeavyWeight_t heavy_weight =
+          Kokkos::Experimental::WorkItemProperty::HintHeavyWeight;
+  constexpr static const typename DriverType::Policy::work_item_property
+      property = typename DriverType::Policy::work_item_property();
+
+  static constexpr const Experimental::CudaLaunchMechanism
+      valid_launch_mechanism =
+          // BuildValidMask
+      (sizeof(DriverType) < CudaTraits::KernelArgumentLimit
+           ? Experimental::CudaLaunchMechanism::LocalMemory
+           : Experimental::CudaLaunchMechanism::Default) |
+      (sizeof(DriverType) < CudaTraits::ConstantMemoryUsage
+           ? Experimental::CudaLaunchMechanism::ConstantMemory
+           : Experimental::CudaLaunchMechanism::Default) |
+      Experimental::CudaLaunchMechanism::GlobalMemory;
+
+  static constexpr const Experimental::CudaLaunchMechanism
+      requested_launch_mechanism =
+          (((property & light_weight) == light_weight)
+               ? Experimental::CudaLaunchMechanism::LocalMemory
+               : Experimental::CudaLaunchMechanism::ConstantMemory) |
+          Experimental::CudaLaunchMechanism::GlobalMemory;
+
+  static constexpr const Experimental::CudaLaunchMechanism
+      default_launch_mechanism =
+          // BuildValidMask
+      (sizeof(DriverType) < CudaTraits::ConstantMemoryUseThreshold)
+          ? Experimental::CudaLaunchMechanism::LocalMemory
+          : ((sizeof(DriverType) < CudaTraits::ConstantMemoryUsage)
+                 ? Experimental::CudaLaunchMechanism::ConstantMemory
+                 : Experimental::CudaLaunchMechanism::GlobalMemory);
+
+  //              None                LightWeight    HeavyWeight
+  // F<UseT       LCG LCG L  L        LCG  LG L  L    LCG  CG L  C
+  // UseT<F<KAL   LCG LCG C  C        LCG  LG C  L    LCG  CG C  C
+  // Kal<F<CMU     CG LCG C  C         CG  LG C  G     CG  CG C  C
+  // CMU<F          G LCG G  G          G  LG G  G      G  CG G  G
+  static constexpr const Experimental::CudaLaunchMechanism launch_mechanism =
+      ((property & light_weight) == light_weight)
+          ? (sizeof(DriverType) < CudaTraits::KernelArgumentLimit
+                 ? Experimental::CudaLaunchMechanism::LocalMemory
+                 : Experimental::CudaLaunchMechanism::GlobalMemory)
+          : (((property & heavy_weight) == heavy_weight)
+                 ? (sizeof(DriverType) < CudaTraits::ConstantMemoryUsage
+                        ? Experimental::CudaLaunchMechanism::ConstantMemory
+                        : Experimental::CudaLaunchMechanism::GlobalMemory)
+                 : (default_launch_mechanism));
+};
+
+// </editor-fold> end DeduceCudaLaunchMechanism }}}2
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="CudaParallelLaunchKernelInvoker"> {{{1
+
+// Base classes that summarize the differences between the different launch
+// mechanisms
+
+template <class DriverType, class LaunchBounds,
+          Experimental::CudaLaunchMechanism LaunchMechanism>
+struct CudaParallelLaunchKernelFunc;
+
+template <class DriverType, class LaunchBounds,
+          Experimental::CudaLaunchMechanism LaunchMechanism>
+struct CudaParallelLaunchKernelInvoker;
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="Local memory"> {{{2
+
+template <class DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM>
+struct CudaParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    Experimental::CudaLaunchMechanism::LocalMemory> {
+  static std::decay_t<decltype(cuda_parallel_launch_local_memory<
+                               DriverType, MaxThreadsPerBlock, MinBlocksPerSM>)>
+  get_kernel_func() {
+    return cuda_parallel_launch_local_memory<DriverType, MaxThreadsPerBlock,
+                                             MinBlocksPerSM>;
+  }
+};
+
+template <class DriverType>
+struct CudaParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<0, 0>,
+    Experimental::CudaLaunchMechanism::LocalMemory> {
+  static std::decay_t<decltype(cuda_parallel_launch_local_memory<DriverType>)>
+  get_kernel_func() {
+    return cuda_parallel_launch_local_memory<DriverType>;
+  }
+};
+
+//------------------------------------------------------------------------------
+
+template <class DriverType, class LaunchBounds>
+struct CudaParallelLaunchKernelInvoker<
+    DriverType, LaunchBounds, Experimental::CudaLaunchMechanism::LocalMemory>
+    : CudaParallelLaunchKernelFunc<
+          DriverType, LaunchBounds,
+          Experimental::CudaLaunchMechanism::LocalMemory> {
+  using base_t = CudaParallelLaunchKernelFunc<
+      DriverType, LaunchBounds, Experimental::CudaLaunchMechanism::LocalMemory>;
+  static_assert(sizeof(DriverType) < CudaTraits::KernelArgumentLimit,
+                "Kokkos Error: Requested CudaLaunchLocalMemory with a Functor "
+                "larger than 4096 bytes.");
+
+  static void invoke_kernel(DriverType const& driver, dim3 const& grid,
+                            dim3 const& block, int shmem,
+                            CudaInternal const* cuda_instance) {
+    (base_t::
+         get_kernel_func())<<<grid, block, shmem, cuda_instance->m_stream>>>(
+        driver);
+  }
+
+#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
+  inline static void create_parallel_launch_graph_node(
+      DriverType const& driver, dim3 const& grid, dim3 const& block, int shmem,
+      CudaInternal const* cuda_instance, bool prefer_shmem) {
+    //----------------------------------------
+    auto const& graph = Impl::get_cuda_graph_from_kernel(driver);
+    KOKKOS_EXPECTS(bool(graph));
+    auto& graph_node = Impl::get_cuda_graph_node_from_kernel(driver);
+    // Expect node not yet initialized
+    KOKKOS_EXPECTS(!bool(graph_node));
+
+    if (!Impl::is_empty_launch(grid, block)) {
+      Impl::check_shmem_request(cuda_instance, shmem);
+      Impl::configure_shmem_preference<DriverType, LaunchBounds>(
+          base_t::get_kernel_func(), prefer_shmem);
+
+      void const* args[] = {&driver};
+
+      cudaKernelNodeParams params = {};
+
+      params.blockDim       = block;
+      params.gridDim        = grid;
+      params.sharedMemBytes = shmem;
+      params.func           = (void*)base_t::get_kernel_func();
+      params.kernelParams   = (void**)args;
+      params.extra          = nullptr;
+
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphAddKernelNode(
+          &graph_node, graph, /* dependencies = */ nullptr,
+          /* numDependencies = */ 0, &params));
+    } else {
+      // We still need an empty node for the dependency structure
+      KOKKOS_IMPL_CUDA_SAFE_CALL(
+          cudaGraphAddEmptyNode(&graph_node, graph,
+                                /* dependencies = */ nullptr,
+                                /* numDependencies = */ 0));
+    }
+    KOKKOS_ENSURES(bool(graph_node))
+  }
+#endif
+};
+
+// </editor-fold> end local memory }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="Global Memory"> {{{2
+
+template <class DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM>
+struct CudaParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    Experimental::CudaLaunchMechanism::GlobalMemory> {
+  static void* get_kernel_func() {
+    return cuda_parallel_launch_global_memory<DriverType, MaxThreadsPerBlock,
+                                              MinBlocksPerSM>;
+  }
+};
+
+template <class DriverType>
+struct CudaParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<0, 0>,
+    Experimental::CudaLaunchMechanism::GlobalMemory> {
+  static std::decay_t<decltype(cuda_parallel_launch_global_memory<DriverType>)>
+  get_kernel_func() {
+    return cuda_parallel_launch_global_memory<DriverType>;
+  }
+};
+
+//------------------------------------------------------------------------------
+
+template <class DriverType, class LaunchBounds>
+struct CudaParallelLaunchKernelInvoker<
+    DriverType, LaunchBounds, Experimental::CudaLaunchMechanism::GlobalMemory>
+    : CudaParallelLaunchKernelFunc<
+          DriverType, LaunchBounds,
+          Experimental::CudaLaunchMechanism::GlobalMemory> {
+  using base_t = CudaParallelLaunchKernelFunc<
+      DriverType, LaunchBounds,
+      Experimental::CudaLaunchMechanism::GlobalMemory>;
+
+  static void invoke_kernel(DriverType const& driver, dim3 const& grid,
+                            dim3 const& block, int shmem,
+                            CudaInternal const* cuda_instance) {
+    DriverType* driver_ptr = reinterpret_cast<DriverType*>(
+        cuda_instance->scratch_functor(sizeof(DriverType)));
+
+    cudaMemcpyAsync(driver_ptr, &driver, sizeof(DriverType), cudaMemcpyDefault,
+                    cuda_instance->m_stream);
+    (base_t::
+         get_kernel_func())<<<grid, block, shmem, cuda_instance->m_stream>>>(
+        driver_ptr);
+  }
+
+#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
+  inline static void create_parallel_launch_graph_node(
+      DriverType const& driver, dim3 const& grid, dim3 const& block, int shmem,
+      CudaInternal const* cuda_instance, bool prefer_shmem) {
+    //----------------------------------------
+    auto const& graph = Impl::get_cuda_graph_from_kernel(driver);
+    KOKKOS_EXPECTS(bool(graph));
+    auto& graph_node = Impl::get_cuda_graph_node_from_kernel(driver);
+    // Expect node not yet initialized
+    KOKKOS_EXPECTS(!bool(graph_node));
+
+    if (!Impl::is_empty_launch(grid, block)) {
+      Impl::check_shmem_request(cuda_instance, shmem);
+      Impl::configure_shmem_preference<DriverType, LaunchBounds>(
+          base_t::get_kernel_func(), prefer_shmem);
+
+      auto* driver_ptr = Impl::allocate_driver_storage_for_kernel(driver);
+
+      // Unlike in the non-graph case, we can get away with doing an async copy
+      // here because the `DriverType` instance is held in the GraphNodeImpl
+      // which is guaranteed to be alive until the graph instance itself is
+      // destroyed, where there should be a fence ensuring that the allocation
+      // associated with this kernel on the device side isn't deleted.
+      cudaMemcpyAsync(driver_ptr, &driver, sizeof(DriverType),
+                      cudaMemcpyDefault, cuda_instance->m_stream);
+
+      void const* args[] = {&driver_ptr};
+
+      cudaKernelNodeParams params = {};
+
+      params.blockDim       = block;
+      params.gridDim        = grid;
+      params.sharedMemBytes = shmem;
+      params.func           = (void*)base_t::get_kernel_func();
+      params.kernelParams   = (void**)args;
+      params.extra          = nullptr;
+
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGraphAddKernelNode(
+          &graph_node, graph, /* dependencies = */ nullptr,
+          /* numDependencies = */ 0, &params));
+    } else {
+      // We still need an empty node for the dependency structure
+      KOKKOS_IMPL_CUDA_SAFE_CALL(
+          cudaGraphAddEmptyNode(&graph_node, graph,
+                                /* dependencies = */ nullptr,
+                                /* numDependencies = */ 0));
+    }
+    KOKKOS_ENSURES(bool(graph_node))
+  }
+#endif
+};
+
+// </editor-fold> end Global Memory }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="Constant Memory"> {{{2
+
+template <class DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM>
+struct CudaParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    Experimental::CudaLaunchMechanism::ConstantMemory> {
+  static std::decay_t<decltype(cuda_parallel_launch_constant_memory<
+                               DriverType, MaxThreadsPerBlock, MinBlocksPerSM>)>
+  get_kernel_func() {
+    return cuda_parallel_launch_constant_memory<DriverType, MaxThreadsPerBlock,
+                                                MinBlocksPerSM>;
+  }
+};
+
+template <class DriverType>
+struct CudaParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<0, 0>,
+    Experimental::CudaLaunchMechanism::ConstantMemory> {
+  static std::decay_t<
+      decltype(cuda_parallel_launch_constant_memory<DriverType>)>
+  get_kernel_func() {
+    return cuda_parallel_launch_constant_memory<DriverType>;
+  }
+};
+
+//------------------------------------------------------------------------------
+
+template <class DriverType, class LaunchBounds>
+struct CudaParallelLaunchKernelInvoker<
+    DriverType, LaunchBounds, Experimental::CudaLaunchMechanism::ConstantMemory>
+    : CudaParallelLaunchKernelFunc<
+          DriverType, LaunchBounds,
+          Experimental::CudaLaunchMechanism::ConstantMemory> {
+  using base_t = CudaParallelLaunchKernelFunc<
+      DriverType, LaunchBounds,
+      Experimental::CudaLaunchMechanism::ConstantMemory>;
+  static_assert(sizeof(DriverType) < CudaTraits::ConstantMemoryUsage,
+                "Kokkos Error: Requested CudaLaunchConstantMemory with a "
+                "Functor larger than 32kB.");
+
+  static void invoke_kernel(DriverType const& driver, dim3 const& grid,
+                            dim3 const& block, int shmem,
+                            CudaInternal const* cuda_instance) {
+    // Wait until the previous kernel that uses the constant buffer is done
+    std::lock_guard<std::mutex> lock(CudaInternal::constantMemMutex);
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaEventSynchronize(CudaInternal::constantMemReusable));
+
+    // Copy functor (synchronously) to staging buffer in pinned host memory
+    unsigned long* staging = cuda_instance->constantMemHostStaging;
+    memcpy(staging, &driver, sizeof(DriverType));
+
+    // Copy functor asynchronously from there to constant memory on the device
+    cudaMemcpyToSymbolAsync(kokkos_impl_cuda_constant_memory_buffer, staging,
+                            sizeof(DriverType), 0, cudaMemcpyHostToDevice,
+                            cudaStream_t(cuda_instance->m_stream));
+
+    // Invoke the driver function on the device
+    (base_t::
+         get_kernel_func())<<<grid, block, shmem, cuda_instance->m_stream>>>();
+
+    // Record an event that says when the constant buffer can be reused
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaEventRecord(CudaInternal::constantMemReusable,
+                        cudaStream_t(cuda_instance->m_stream)));
+  }
+
+#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
+  inline static void create_parallel_launch_graph_node(
+      DriverType const& driver, dim3 const& grid, dim3 const& block, int shmem,
+      CudaInternal const* cuda_instance, bool prefer_shmem) {
+    // Just use global memory; coordinating through events to share constant
+    // memory with the non-graph interface is not really reasonable since
+    // events don't work with Graphs directly, and this would anyway require
+    // a much more complicated structure that finds previous nodes in the
+    // dependency structure of the graph and creates an implicit dependence
+    // based on the need for constant memory (which we would then have to
+    // somehow go and prove was not creating a dependency cycle, and I don't
+    // even know if there's an efficient way to do that, let alone in the
+    // structure we currenty have).
+    using global_launch_impl_t = CudaParallelLaunchKernelInvoker<
+        DriverType, LaunchBounds,
+        Experimental::CudaLaunchMechanism::GlobalMemory>;
+    global_launch_impl_t::create_parallel_launch_graph_node(
+        driver, grid, block, shmem, cuda_instance, prefer_shmem);
+  }
+#endif
+};
+
+// </editor-fold> end Constant Memory }}}2
+//------------------------------------------------------------------------------
+
+// </editor-fold> end CudaParallelLaunchKernelInvoker }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="CudaParallelLaunchImpl"> {{{1
+
+template <class DriverType, class LaunchBounds,
+          Experimental::CudaLaunchMechanism LaunchMechanism>
+struct CudaParallelLaunchImpl;
+
+template <class DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM,
+          Experimental::CudaLaunchMechanism LaunchMechanism>
+struct CudaParallelLaunchImpl<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    LaunchMechanism>
+    : CudaParallelLaunchKernelInvoker<
+          DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+          LaunchMechanism> {
+  using base_t = CudaParallelLaunchKernelInvoker<
+      DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+      LaunchMechanism>;
+
+  inline static void launch_kernel(const DriverType& driver, const dim3& grid,
+                                   const dim3& block, int shmem,
+                                   const CudaInternal* cuda_instance,
+                                   bool prefer_shmem) {
+    if (!Impl::is_empty_launch(grid, block)) {
+      // Prevent multiple threads to simultaneously set the cache configuration
+      // preference and launch the same kernel
+      static std::mutex mutex;
+      std::lock_guard<std::mutex> lock(mutex);
+
+      Impl::check_shmem_request(cuda_instance, shmem);
+
+      // If a desired occupancy is specified, we compute how much shared memory
+      // to ask for to achieve that occupancy, assuming that the cache
+      // configuration is `cudaFuncCachePreferL1`.  If the amount of dynamic
+      // shared memory computed is actually smaller than `shmem` we overwrite
+      // `shmem` and set `prefer_shmem` to `false`.
+      modify_launch_configuration_if_desired_occupancy_is_specified(
+          driver.get_policy(), cuda_instance->m_deviceProp,
+          get_cuda_func_attributes(), block, shmem, prefer_shmem);
+
+      Impl::configure_shmem_preference<
+          DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>>(
+          base_t::get_kernel_func(), prefer_shmem);
+
+      ensure_cuda_lock_arrays_on_device();
+
+      // Invoke the driver function on the device
+      base_t::invoke_kernel(driver, grid, block, shmem, cuda_instance);
+
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+      KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetLastError());
+      cuda_instance->fence(
+          "Kokkos::Impl::launch_kernel: Debug Only Check for Execution Error");
+#endif
+    }
+  }
+
+  static cudaFuncAttributes get_cuda_func_attributes() {
+    // Race condition inside of cudaFuncGetAttributes if the same address is
+    // given requires using a local variable as input instead of a static Rely
+    // on static variable initialization to make sure only one thread executes
+    // the code and the result is visible.
+    auto wrap_get_attributes = []() -> cudaFuncAttributes {
+      cudaFuncAttributes attr_tmp;
+      KOKKOS_IMPL_CUDA_SAFE_CALL(
+          cudaFuncGetAttributes(&attr_tmp, base_t::get_kernel_func()));
+      return attr_tmp;
+    };
+    static cudaFuncAttributes attr = wrap_get_attributes();
+    return attr;
+  }
+};
+
+// </editor-fold> end CudaParallelLaunchImpl }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="CudaParallelLaunch"> {{{1
+
+template <class DriverType, class LaunchBounds = Kokkos::LaunchBounds<>,
+          Experimental::CudaLaunchMechanism LaunchMechanism =
+              DeduceCudaLaunchMechanism<DriverType>::launch_mechanism,
+          bool DoGraph = DriverType::Policy::is_graph_kernel::value
+#ifndef KOKKOS_CUDA_ENABLE_GRAPHS
+                         && false
+#endif
+          >
+struct CudaParallelLaunch;
+
+// General launch mechanism
+template <class DriverType, class LaunchBounds,
+          Experimental::CudaLaunchMechanism LaunchMechanism>
+struct CudaParallelLaunch<DriverType, LaunchBounds, LaunchMechanism,
+                          /* DoGraph = */ false>
+    : CudaParallelLaunchImpl<DriverType, LaunchBounds, LaunchMechanism> {
+  using base_t =
+      CudaParallelLaunchImpl<DriverType, LaunchBounds, LaunchMechanism>;
+  template <class... Args>
+  CudaParallelLaunch(Args&&... args) {
+    base_t::launch_kernel((Args &&) args...);
+  }
+};
+
+#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
+// Launch mechanism for creating graph nodes
+template <class DriverType, class LaunchBounds,
+          Experimental::CudaLaunchMechanism LaunchMechanism>
+struct CudaParallelLaunch<DriverType, LaunchBounds, LaunchMechanism,
+                          /* DoGraph = */ true>
+    : CudaParallelLaunchImpl<DriverType, LaunchBounds, LaunchMechanism> {
+  using base_t =
+      CudaParallelLaunchImpl<DriverType, LaunchBounds, LaunchMechanism>;
+  template <class... Args>
+  CudaParallelLaunch(Args&&... args) {
+    base_t::create_parallel_launch_graph_node((Args &&) args...);
+  }
+};
+#endif
+
+// </editor-fold> end CudaParallelLaunch }}}1
+//==============================================================================
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* defined( KOKKOS_ENABLE_CUDA ) */
+#endif /* #ifndef KOKKOS_CUDAEXEC_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Locks.cpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Locks.cpp
new file mode 100644 (file)
index 0000000..3796534
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+#include <Cuda/Kokkos_Cuda_Locks.hpp>
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+namespace Kokkos {
+namespace Impl {
+__device__ __constant__ CudaLockArrays g_device_cuda_lock_arrays = {nullptr, 0};
+}
+}  // namespace Kokkos
+#endif
+
+namespace Kokkos {
+
+namespace {
+
+__global__ void init_lock_array_kernel_atomic() {
+  unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
+  if (i < CUDA_SPACE_ATOMIC_MASK + 1) {
+    Kokkos::Impl::g_device_cuda_lock_arrays.atomic[i] = 0;
+  }
+}
+
+}  // namespace
+
+namespace Impl {
+
+CudaLockArrays g_host_cuda_lock_arrays = {nullptr, 0};
+
+void initialize_host_cuda_lock_arrays() {
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+  desul::Impl::init_lock_arrays();
+  desul::ensure_cuda_lock_arrays_on_device();
+#endif
+  if (g_host_cuda_lock_arrays.atomic != nullptr) return;
+  KOKKOS_IMPL_CUDA_SAFE_CALL(
+      cudaMalloc(&g_host_cuda_lock_arrays.atomic,
+                 sizeof(int) * (CUDA_SPACE_ATOMIC_MASK + 1)));
+  Impl::cuda_device_synchronize(
+      "Kokkos::Impl::initialize_host_cuda_lock_arrays: Pre Init Lock Arrays");
+  g_host_cuda_lock_arrays.n = Cuda::concurrency();
+  copy_cuda_lock_arrays_to_device();
+  init_lock_array_kernel_atomic<<<(CUDA_SPACE_ATOMIC_MASK + 1 + 255) / 256,
+                                  256>>>();
+  Impl::cuda_device_synchronize(
+      "Kokkos::Impl::initialize_host_cuda_lock_arrays: Post Init Lock Arrays");
+}
+
+void finalize_host_cuda_lock_arrays() {
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+  desul::Impl::finalize_lock_arrays();
+#endif
+
+  if (g_host_cuda_lock_arrays.atomic == nullptr) return;
+  cudaFree(g_host_cuda_lock_arrays.atomic);
+  g_host_cuda_lock_arrays.atomic = nullptr;
+  g_host_cuda_lock_arrays.n      = 0;
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+  copy_cuda_lock_arrays_to_device();
+#endif
+}
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+#else
+
+void KOKKOS_CORE_SRC_CUDA_CUDA_LOCKS_PREVENT_LINK_ERROR() {}
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Locks.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Locks.hpp
new file mode 100644 (file)
index 0000000..244f142
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_LOCKS_HPP
+#define KOKKOS_CUDA_LOCKS_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <cstdint>
+
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+#include <desul/atomics/Lock_Array_Cuda.hpp>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+struct CudaLockArrays {
+  std::int32_t* atomic;
+  std::int32_t n;
+};
+
+/// \brief This global variable in Host space is the central definition
+///        of these arrays.
+extern CudaLockArrays g_host_cuda_lock_arrays;
+
+/// \brief After this call, the g_host_cuda_lock_arrays variable has
+///        valid, initialized arrays.
+///
+/// This call is idempotent.
+void initialize_host_cuda_lock_arrays();
+
+/// \brief After this call, the g_host_cuda_lock_arrays variable has
+///        all null pointers, and all array memory has been freed.
+///
+/// This call is idempotent.
+void finalize_host_cuda_lock_arrays();
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+/// \brief This global variable in CUDA space is what kernels use
+///        to get access to the lock arrays.
+///
+/// When relocatable device code is enabled, there can be one single
+/// instance of this global variable for the entire executable,
+/// whose definition will be in Kokkos_Cuda_Locks.cpp (and whose declaration
+/// here must then be extern.
+/// This one instance will be initialized by initialize_host_cuda_lock_arrays
+/// and need not be modified afterwards.
+///
+/// When relocatable device code is disabled, an instance of this variable
+/// will be created in every translation unit that sees this header file
+/// (we make this clear by marking it static, meaning no other translation
+///  unit can link to it).
+/// Since the Kokkos_Cuda_Locks.cpp translation unit cannot initialize the
+/// instances in other translation units, we must update this CUDA global
+/// variable based on the Host global variable prior to running any kernels
+/// that will use it.
+/// That is the purpose of the ensure_cuda_lock_arrays_on_device function.
+__device__
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+    __constant__ extern
+#endif
+    CudaLockArrays g_device_cuda_lock_arrays;
+
+#define CUDA_SPACE_ATOMIC_MASK 0x1FFFF
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+__device__ inline bool lock_address_cuda_space(void* ptr) {
+  size_t offset = size_t(ptr);
+  offset        = offset >> 2;
+  offset        = offset & CUDA_SPACE_ATOMIC_MASK;
+  return (0 == atomicCAS(&g_device_cuda_lock_arrays.atomic[offset], 0, 1));
+}
+
+/// \brief Release lock for the address
+///
+/// This function releases the lock for the hash value derived
+/// from the provided ptr. This function should only be called
+/// after previously successfully acquiring a lock with
+/// lock_address.
+__device__ inline void unlock_address_cuda_space(void* ptr) {
+  size_t offset = size_t(ptr);
+  offset        = offset >> 2;
+  offset        = offset & CUDA_SPACE_ATOMIC_MASK;
+  atomicExch(&g_device_cuda_lock_arrays.atomic[offset], 0);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+// Make lock_array_copied an explicit translation unit scope thingy
+namespace Kokkos {
+namespace Impl {
+namespace {
+static int lock_array_copied = 0;
+inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
+}  // namespace
+
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+inline
+#else
+static
+#endif
+    void
+    copy_cuda_lock_arrays_to_device() {
+  if (lock_array_copied == 0) {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemcpyToSymbol(g_device_cuda_lock_arrays,
+                                                  &g_host_cuda_lock_arrays,
+                                                  sizeof(CudaLockArrays)));
+  }
+  lock_array_copied = 1;
+}
+
+#ifndef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+inline void ensure_cuda_lock_arrays_on_device() {}
+#else
+inline static void ensure_cuda_lock_arrays_on_device() {
+  copy_cuda_lock_arrays_to_device();
+}
+#endif
+
+#else
+
+#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
+inline void ensure_cuda_lock_arrays_on_device() {}
+#else
+// Still Need COPY_CUDA_LOCK_ARRAYS for team scratch etc.
+inline static void ensure_cuda_lock_arrays_on_device() {
+  copy_cuda_lock_arrays_to_device();
+  desul::ensure_cuda_lock_arrays_on_device();
+}
+#endif
+
+#endif /* defined( KOKKOS_ENABLE_IMPL_DESUL_ATOMICS ) */
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* defined( KOKKOS_ENABLE_CUDA ) */
+
+#endif /* #ifndef KOKKOS_CUDA_LOCKS_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_MDRangePolicy.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_MDRangePolicy.hpp
new file mode 100644 (file)
index 0000000..12b7f70
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef KOKKOS_CUDA_MDRANGEPOLICY_HPP_
+#define KOKKOS_CUDA_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+
+template <>
+struct default_outer_direction<Kokkos::Cuda> {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Left;
+};
+
+template <>
+struct default_inner_direction<Kokkos::Cuda> {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Left;
+};
+
+namespace Impl {
+
+// Settings for MDRangePolicy
+template <>
+inline TileSizeProperties get_tile_size_properties<Kokkos::Cuda>(
+    const Kokkos::Cuda& space) {
+  TileSizeProperties properties;
+  properties.max_threads =
+      space.impl_internal_space_instance()->m_maxThreadsPerSM;
+  properties.default_largest_tile_size = 16;
+  properties.default_tile_size         = 2;
+  properties.max_total_tile_size       = 512;
+  return properties;
+}
+
+}  // Namespace Impl
+}  // Namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_MDRange.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_MDRange.hpp
new file mode 100644 (file)
index 0000000..e586bb4
--- /dev/null
@@ -0,0 +1,477 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_PARALLEL_MD_RANGE_HPP
+#define KOKKOS_CUDA_PARALLEL_MD_RANGE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <algorithm>
+#include <string>
+
+#include <Kokkos_Parallel.hpp>
+
+#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
+#include <Cuda/Kokkos_Cuda_ReduceScan.hpp>
+#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+#include <Kokkos_MinMaxClamp.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+#include <typeinfo>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+#include <impl/KokkosExp_IterateTileGPU.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>, Kokkos::Cuda> {
+ public:
+  using Policy       = Kokkos::MDRangePolicy<Traits...>;
+  using functor_type = FunctorType;
+
+ private:
+  using RP               = Policy;
+  using array_index_type = typename Policy::array_index_type;
+  using index_type       = typename Policy::index_type;
+  using LaunchBounds     = typename Policy::launch_bounds;
+
+  const FunctorType m_functor;
+  const Policy m_rp;
+
+ public:
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy& pol, const Functor&) {
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<ParallelFor,
+                           LaunchBounds>::get_cuda_func_attributes();
+    auto const& prop = pol.space().cuda_device_prop();
+    // Limits due to registers/SM, MDRange doesn't have
+    // shared memory constraints
+    int const regs_per_sm        = prop.regsPerMultiprocessor;
+    int const regs_per_thread    = attr.numRegs;
+    int const max_threads_per_sm = regs_per_sm / regs_per_thread;
+    return std::min(
+        max_threads_per_sm,
+        static_cast<int>(Kokkos::Impl::CudaTraits::MaxHierarchicalParallelism));
+  }
+  Policy const& get_policy() const { return m_rp; }
+  inline __device__ void operator()() const {
+    Kokkos::Impl::DeviceIterateTile<Policy::rank, Policy, FunctorType,
+                                    typename Policy::work_tag>(m_rp, m_functor)
+        .exec_range();
+  }
+
+  inline void execute() const {
+    if (m_rp.m_num_tiles == 0) return;
+    const auto maxblocks = cuda_internal_maximum_grid_count();
+    if (RP::rank == 2) {
+      const dim3 block(m_rp.m_tile[0], m_rp.m_tile[1], 1);
+      KOKKOS_ASSERT(block.x > 0);
+      KOKKOS_ASSERT(block.y > 0);
+      const dim3 grid(
+          std::min<array_index_type>(
+              (m_rp.m_upper[0] - m_rp.m_lower[0] + block.x - 1) / block.x,
+              maxblocks[0]),
+          std::min<array_index_type>(
+              (m_rp.m_upper[1] - m_rp.m_lower[1] + block.y - 1) / block.y,
+              maxblocks[1]),
+          1);
+      CudaParallelLaunch<ParallelFor, LaunchBounds>(
+          *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
+          false);
+    } else if (RP::rank == 3) {
+      const dim3 block(m_rp.m_tile[0], m_rp.m_tile[1], m_rp.m_tile[2]);
+      KOKKOS_ASSERT(block.x > 0);
+      KOKKOS_ASSERT(block.y > 0);
+      KOKKOS_ASSERT(block.z > 0);
+      const dim3 grid(
+          std::min<array_index_type>(
+              (m_rp.m_upper[0] - m_rp.m_lower[0] + block.x - 1) / block.x,
+              maxblocks[0]),
+          std::min<array_index_type>(
+              (m_rp.m_upper[1] - m_rp.m_lower[1] + block.y - 1) / block.y,
+              maxblocks[1]),
+          std::min<array_index_type>(
+              (m_rp.m_upper[2] - m_rp.m_lower[2] + block.z - 1) / block.z,
+              maxblocks[2]));
+      CudaParallelLaunch<ParallelFor, LaunchBounds>(
+          *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
+          false);
+    } else if (RP::rank == 4) {
+      // id0,id1 encoded within threadIdx.x; id2 to threadIdx.y; id3 to
+      // threadIdx.z
+      const dim3 block(m_rp.m_tile[0] * m_rp.m_tile[1], m_rp.m_tile[2],
+                       m_rp.m_tile[3]);
+      KOKKOS_ASSERT(block.y > 0);
+      KOKKOS_ASSERT(block.z > 0);
+      const dim3 grid(
+          std::min<array_index_type>(m_rp.m_tile_end[0] * m_rp.m_tile_end[1],
+                                     maxblocks[0]),
+          std::min<array_index_type>(
+              (m_rp.m_upper[2] - m_rp.m_lower[2] + block.y - 1) / block.y,
+              maxblocks[1]),
+          std::min<array_index_type>(
+              (m_rp.m_upper[3] - m_rp.m_lower[3] + block.z - 1) / block.z,
+              maxblocks[2]));
+      CudaParallelLaunch<ParallelFor, LaunchBounds>(
+          *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
+          false);
+    } else if (RP::rank == 5) {
+      // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y; id4 to
+      // threadIdx.z
+      const dim3 block(m_rp.m_tile[0] * m_rp.m_tile[1],
+                       m_rp.m_tile[2] * m_rp.m_tile[3], m_rp.m_tile[4]);
+      KOKKOS_ASSERT(block.z > 0);
+      const dim3 grid(
+          std::min<array_index_type>(m_rp.m_tile_end[0] * m_rp.m_tile_end[1],
+                                     maxblocks[0]),
+          std::min<array_index_type>(m_rp.m_tile_end[2] * m_rp.m_tile_end[3],
+                                     maxblocks[1]),
+          std::min<array_index_type>(
+              (m_rp.m_upper[4] - m_rp.m_lower[4] + block.z - 1) / block.z,
+              maxblocks[2]));
+      CudaParallelLaunch<ParallelFor, LaunchBounds>(
+          *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
+          false);
+    } else if (RP::rank == 6) {
+      // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y; id4,id5 to
+      // threadIdx.z
+      const dim3 block(m_rp.m_tile[0] * m_rp.m_tile[1],
+                       m_rp.m_tile[2] * m_rp.m_tile[3],
+                       m_rp.m_tile[4] * m_rp.m_tile[5]);
+      const dim3 grid(
+          std::min<array_index_type>(m_rp.m_tile_end[0] * m_rp.m_tile_end[1],
+                                     maxblocks[0]),
+          std::min<array_index_type>(m_rp.m_tile_end[2] * m_rp.m_tile_end[3],
+                                     maxblocks[1]),
+          std::min<array_index_type>(m_rp.m_tile_end[4] * m_rp.m_tile_end[5],
+                                     maxblocks[2]));
+      CudaParallelLaunch<ParallelFor, LaunchBounds>(
+          *this, grid, block, 0, m_rp.space().impl_internal_space_instance(),
+          false);
+    } else {
+      Kokkos::abort("Kokkos::MDRange Error: Exceeded rank bounds with Cuda\n");
+    }
+
+  }  // end execute
+
+  //  inline
+  ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+      : m_functor(arg_functor), m_rp(arg_policy) {}
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::Cuda> {
+ public:
+  using Policy = Kokkos::MDRangePolicy<Traits...>;
+
+ private:
+  using array_index_type = typename Policy::array_index_type;
+  using index_type       = typename Policy::index_type;
+
+  using WorkTag      = typename Policy::work_tag;
+  using Member       = typename Policy::member_type;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis =
+      Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
+                                    ReducerTypeFwd>;
+
+ public:
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  using size_type      = Cuda::size_type;
+  using reducer_type   = ReducerType;
+
+  // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
+  // blockDim.z == 1
+
+  const FunctorType m_functor;
+  const Policy m_policy;  // used for workrange and nwork
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+  size_type* m_scratch_space;
+  size_type* m_scratch_flags;
+  size_type* m_unified_space;
+
+  using DeviceIteratePattern = typename Kokkos::Impl::Reduce::DeviceIterateTile<
+      Policy::rank, Policy, FunctorType, typename Policy::work_tag,
+      reference_type>;
+
+  // Shall we use the shfl based reduction or not (only use it for static sized
+  // types of more than 128bit
+  static constexpr bool UseShflReduction = false;
+  //((sizeof(value_type)>2*sizeof(double)) && Analysis::StaticValueSize)
+  // Some crutch to do function overloading
+
+ public:
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy& pol, const Functor&) {
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<ParallelReduce,
+                           LaunchBounds>::get_cuda_func_attributes();
+    auto const& prop = pol.space().cuda_device_prop();
+    // Limits due do registers/SM
+    int const regs_per_sm        = prop.regsPerMultiprocessor;
+    int const regs_per_thread    = attr.numRegs;
+    int const max_threads_per_sm = regs_per_sm / regs_per_thread;
+    return std::min(
+        max_threads_per_sm,
+        static_cast<int>(Kokkos::Impl::CudaTraits::MaxHierarchicalParallelism));
+  }
+  Policy const& get_policy() const { return m_policy; }
+  inline __device__ void exec_range(reference_type update) const {
+    Kokkos::Impl::Reduce::DeviceIterateTile<Policy::rank, Policy, FunctorType,
+                                            typename Policy::work_tag,
+                                            reference_type>(m_policy, m_functor,
+                                                            update)
+        .exec_range();
+  }
+
+  inline __device__ void operator()() const {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(
+                       ReducerConditional::select(m_functor, m_reducer)) /
+                   sizeof(size_type));
+
+    {
+      reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
+          kokkos_impl_cuda_shared_memory<size_type>() +
+          threadIdx.y * word_count.value));
+
+      // Number of blocks is bounded so that the reduction can be limited to two
+      // passes. Each thread block is given an approximately equal amount of
+      // work to perform. Accumulate the values for this block. The accumulation
+      // ordering does not match the final pass, but is arithmatically
+      // equivalent.
+
+      this->exec_range(value);
+    }
+
+    // Reduce with final value at blockDim.y - 1 location.
+    // Problem: non power-of-two blockDim
+    if (cuda_single_inter_block_reduce_scan<false>(
+            final_reducer, blockIdx.x, gridDim.x,
+            kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+            m_scratch_flags)) {
+      // This is the final block with the final result at the final threads'
+      // location
+      size_type* const shared = kokkos_impl_cuda_shared_memory<size_type>() +
+                                (blockDim.y - 1) * word_count.value;
+      size_type* const global =
+          m_result_ptr_device_accessible
+              ? reinterpret_cast<size_type*>(m_result_ptr)
+              : (m_unified_space ? m_unified_space : m_scratch_space);
+
+      if (threadIdx.y == 0) {
+        final_reducer.final(reinterpret_cast<value_type*>(shared));
+      }
+
+      if (CudaTraits::WarpSize < word_count.value) {
+        __syncthreads();
+      }
+
+      for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+        global[i] = shared[i];
+      }
+    }
+  }
+
+  // Determine block size constrained by shared memory:
+  inline unsigned local_block_size(const FunctorType& f) {
+    unsigned n = CudaTraits::WarpSize * 8;
+    int shmem_size =
+        cuda_single_inter_block_reduce_scan_shmem<false, FunctorType, WorkTag>(
+            f, n);
+    using closure_type = Impl::ParallelReduce<FunctorType, Policy, ReducerType>;
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<closure_type,
+                           LaunchBounds>::get_cuda_func_attributes();
+    while (
+        (n &&
+         (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+          shmem_size)) ||
+        (n >
+         static_cast<unsigned>(
+             Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
+                 m_policy.space().impl_internal_space_instance(), attr, f, 1,
+                 shmem_size, 0)))) {
+      n >>= 1;
+      shmem_size = cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                             WorkTag>(f, n);
+    }
+    return n;
+  }
+
+  inline void execute() {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const auto nwork = m_policy.m_num_tiles;
+    if (nwork) {
+      int block_size = m_policy.m_prod_tile_dims;
+      // CONSTRAINT: Algorithm requires block_size >= product of tile dimensions
+      // Nearest power of two
+      int exponent_pow_two    = std::ceil(std::log2(block_size));
+      block_size              = std::pow(2, exponent_pow_two);
+      int suggested_blocksize = local_block_size(m_functor);
+
+      block_size = (block_size > suggested_blocksize)
+                       ? block_size
+                       : suggested_blocksize;  // Note: block_size must be less
+                                               // than or equal to 512
+
+      m_scratch_space = cuda_internal_scratch_space(
+          m_policy.space(), Analysis::value_size(ReducerConditional::select(
+                                m_functor, m_reducer)) *
+                                block_size /* block_size == max block_count */);
+      m_scratch_flags =
+          cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type));
+      m_unified_space = cuda_internal_scratch_unified(
+          m_policy.space(), Analysis::value_size(ReducerConditional::select(
+                                m_functor, m_reducer)));
+
+      // REQUIRED ( 1 , N , 1 )
+      const dim3 block(1, block_size, 1);
+      // Required grid.x <= block.y
+      const dim3 grid(std::min(int(block.y), int(nwork)), 1, 1);
+
+      // TODO @graph We need to effectively insert this in to the graph
+      const int shmem =
+          UseShflReduction
+              ? 0
+              : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                          WorkTag>(m_functor,
+                                                                   block.y);
+
+      CudaParallelLaunch<ParallelReduce, LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+
+      if (!m_result_ptr_device_accessible) {
+        if (m_result_ptr) {
+          if (m_unified_space) {
+            m_policy.space().fence(
+                "Kokkos::Impl::ParallelReduce<Cuda, MDRangePolicy>::execute: "
+                "Result Not Device Accessible");
+
+            const int count = Analysis::value_count(
+                ReducerConditional::select(m_functor, m_reducer));
+            for (int i = 0; i < count; ++i) {
+              m_result_ptr[i] = pointer_type(m_unified_space)[i];
+            }
+          } else {
+            const int size = Analysis::value_size(
+                ReducerConditional::select(m_functor, m_reducer));
+            DeepCopy<HostSpace, CudaSpace, Cuda>(m_policy.space(), m_result_ptr,
+                                                 m_scratch_space, size);
+          }
+        }
+      }
+    } else {
+      if (m_result_ptr) {
+        // TODO @graph We need to effectively insert this in to the graph
+        final_reducer.init(m_result_ptr);
+      }
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(
+      const FunctorType& arg_functor, const Policy& arg_policy,
+      const ViewType& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::CudaSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_unified_space(nullptr) {
+    check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
+  }
+
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::CudaSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_unified_space(nullptr) {
+    check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_Range.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_Range.hpp
new file mode 100644 (file)
index 0000000..9873343
--- /dev/null
@@ -0,0 +1,1049 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_PARALLEL_RANGE_HPP
+#define KOKKOS_CUDA_PARALLEL_RANGE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <algorithm>
+#include <string>
+
+#include <Kokkos_Parallel.hpp>
+
+#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
+#include <Cuda/Kokkos_Cuda_ReduceScan.hpp>
+#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+#include <Kokkos_MinMaxClamp.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+#include <typeinfo>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>, Kokkos::Cuda> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  ParallelFor()        = delete;
+  ParallelFor& operator=(const ParallelFor&) = delete;
+
+  template <class TagType>
+  inline __device__ std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const Member i) const {
+    m_functor(i);
+  }
+
+  template <class TagType>
+  inline __device__ std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const Member i) const {
+    m_functor(TagType(), i);
+  }
+
+ public:
+  using functor_type = FunctorType;
+
+  Policy const& get_policy() const { return m_policy; }
+
+  inline __device__ void operator()() const {
+    const Member work_stride = blockDim.y * gridDim.x;
+    const Member work_end    = m_policy.end();
+
+    for (Member iwork =
+             m_policy.begin() + threadIdx.y + blockDim.y * blockIdx.x;
+         iwork < work_end;
+         iwork = iwork < work_end - work_stride ? iwork + work_stride
+                                                : work_end) {
+      this->template exec_range<WorkTag>(iwork);
+    }
+  }
+
+  inline void execute() const {
+    const typename Policy::index_type nwork = m_policy.end() - m_policy.begin();
+
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<ParallelFor,
+                           LaunchBounds>::get_cuda_func_attributes();
+    const int block_size =
+        Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
+            m_policy.space().impl_internal_space_instance(), attr, m_functor, 1,
+            0, 0);
+    KOKKOS_ASSERT(block_size > 0);
+    dim3 block(1, block_size, 1);
+    dim3 grid(
+        std::min(
+            typename Policy::index_type((nwork + block.y - 1) / block.y),
+            typename Policy::index_type(cuda_internal_maximum_grid_count()[0])),
+        1, 1);
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+    if (Kokkos::Impl::CudaInternal::cuda_use_serial_execution()) {
+      block = dim3(1, 1, 1);
+      grid  = dim3(1, 1, 1);
+    }
+#endif
+
+    CudaParallelLaunch<ParallelFor, LaunchBounds>(
+        *this, grid, block, 0, m_policy.space().impl_internal_space_instance(),
+        false);
+  }
+
+  ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::Cuda> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using WorkRange    = typename Policy::WorkRange;
+  using WorkTag      = typename Policy::work_tag;
+  using Member       = typename Policy::member_type;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis =
+      Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
+                                    ReducerTypeFwd>;
+
+ public:
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  // Conditionally set word_size_type to int16_t or int8_t if value_type is
+  // smaller than int32_t (Kokkos::Cuda::size_type)
+  // word_size_type is used to determine the word count, shared memory buffer
+  // size, and global memory buffer size before the reduction is performed.
+  // Within the reduction, the word count is recomputed based on word_size_type
+  // and when calculating indexes into the shared/global memory buffers for
+  // performing the reduction, word_size_type is used again.
+  // For scalars > 4 bytes in size, indexing into shared/global memory relies
+  // on the block and grid dimensions to ensure that we index at the correct
+  // offset rather than at every 4 byte word; such that, when the join is
+  // performed, we have the correct data that was copied over in chunks of 4
+  // bytes.
+  using word_size_type = std::conditional_t<
+      sizeof(value_type) < sizeof(Kokkos::Cuda::size_type),
+      std::conditional_t<sizeof(value_type) == 2, int16_t, int8_t>,
+      Kokkos::Cuda::size_type>;
+  using index_type   = typename Policy::index_type;
+  using reducer_type = ReducerType;
+
+  // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
+  // blockDim.z == 1
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+  const bool m_result_ptr_host_accessible;
+  word_size_type* m_scratch_space;
+  // m_scratch_flags must be of type Cuda::size_type due to use of atomics
+  // for tracking metadata in Kokkos_Cuda_ReduceScan.hpp
+  Cuda::size_type* m_scratch_flags;
+  word_size_type* m_unified_space;
+
+  // FIXME_CUDA Shall we use the shfl based reduction or not (only use it for
+  // static sized types of more than 128bit:
+  // sizeof(value_type)>2*sizeof(double)) && Analysis::StaticValueSize)
+  static constexpr bool UseShflReduction = false;
+
+ public:
+  Policy const& get_policy() const { return m_policy; }
+
+  // Make the exec_range calls call to Reduce::DeviceIterateTile
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update) const {
+    m_functor(i, update);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update) const {
+    m_functor(TagType(), i, update);
+  }
+
+  __device__ inline void operator()() const {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const integral_nonzero_constant<word_size_type, Analysis::StaticValueSize /
+                                                        sizeof(word_size_type)>
+        word_count(Analysis::value_size(
+                       ReducerConditional::select(m_functor, m_reducer)) /
+                   sizeof(word_size_type));
+
+    {
+      reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
+          kokkos_impl_cuda_shared_memory<word_size_type>() +
+          threadIdx.y * word_count.value));
+
+      // Number of blocks is bounded so that the reduction can be limited to two
+      // passes. Each thread block is given an approximately equal amount of
+      // work to perform. Accumulate the values for this block. The accumulation
+      // ordering does not match the final pass, but is arithmatically
+      // equivalent.
+
+      const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+      for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+           iwork < iwork_end; iwork += blockDim.y) {
+        this->template exec_range<WorkTag>(iwork, value);
+      }
+    }
+
+    // Reduce with final value at blockDim.y - 1 location.
+    // Shortcut for length zero reduction
+    bool zero_length        = m_policy.begin() == m_policy.end();
+    bool do_final_reduction = true;
+    if (!zero_length)
+      do_final_reduction = cuda_single_inter_block_reduce_scan<false>(
+          final_reducer, blockIdx.x, gridDim.x,
+          kokkos_impl_cuda_shared_memory<word_size_type>(), m_scratch_space,
+          m_scratch_flags);
+
+    if (do_final_reduction) {
+      // This is the final block with the final result at the final threads'
+      // location
+
+      word_size_type* const shared =
+          kokkos_impl_cuda_shared_memory<word_size_type>() +
+          (blockDim.y - 1) * word_count.value;
+      word_size_type* const global =
+          m_result_ptr_device_accessible
+              ? reinterpret_cast<word_size_type*>(m_result_ptr)
+              : (m_unified_space ? m_unified_space : m_scratch_space);
+
+      if (threadIdx.y == 0) {
+        final_reducer.final(reinterpret_cast<value_type*>(shared));
+      }
+
+      if (CudaTraits::WarpSize < word_count.value) {
+        __syncthreads();
+      }
+
+      for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+        global[i] = shared[i];
+      }
+    }
+  }
+
+  // Determine block size constrained by shared memory:
+  inline unsigned local_block_size(const FunctorType& f) {
+    unsigned n = CudaTraits::WarpSize * 8;
+    int shmem_size =
+        cuda_single_inter_block_reduce_scan_shmem<false, FunctorType, WorkTag>(
+            f, n);
+    using closure_type = Impl::ParallelReduce<FunctorType, Policy, ReducerType>;
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<closure_type,
+                           LaunchBounds>::get_cuda_func_attributes();
+    while (
+        (n &&
+         (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+          shmem_size)) ||
+        (n >
+         static_cast<unsigned>(
+             Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
+                 m_policy.space().impl_internal_space_instance(), attr, f, 1,
+                 shmem_size, 0)))) {
+      n >>= 1;
+      shmem_size = cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                             WorkTag>(f, n);
+    }
+    return n;
+  }
+
+  inline void execute() {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const index_type nwork     = m_policy.end() - m_policy.begin();
+    const bool need_device_set = Analysis::has_init_member_function ||
+                                 Analysis::has_final_member_function ||
+                                 !m_result_ptr_host_accessible ||
+#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
+                                 Policy::is_graph_kernel::value ||
+#endif
+                                 !std::is_same<ReducerType, InvalidType>::value;
+    if ((nwork > 0) || need_device_set) {
+      const int block_size = local_block_size(m_functor);
+
+      KOKKOS_ASSERT(block_size > 0);
+
+      // TODO: down casting these uses more space than required?
+      m_scratch_space = (word_size_type*)cuda_internal_scratch_space(
+          m_policy.space(), Analysis::value_size(ReducerConditional::select(
+                                m_functor, m_reducer)) *
+                                block_size /* block_size == max block_count */);
+
+      // Intentionally do not downcast to word_size_type since we use Cuda
+      // atomics in Kokkos_Cuda_ReduceScan.hpp
+      m_scratch_flags = cuda_internal_scratch_flags(m_policy.space(),
+                                                    sizeof(Cuda::size_type));
+      m_unified_space =
+          reinterpret_cast<word_size_type*>(cuda_internal_scratch_unified(
+              m_policy.space(), Analysis::value_size(ReducerConditional::select(
+                                    m_functor, m_reducer))));
+
+      // REQUIRED ( 1 , N , 1 )
+      dim3 block(1, block_size, 1);
+      // Required grid.x <= block.y
+      dim3 grid(std::min(int(block.y), int((nwork + block.y - 1) / block.y)), 1,
+                1);
+
+      // TODO @graph We need to effectively insert this in to the graph
+      const int shmem =
+          UseShflReduction
+              ? 0
+              : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                          WorkTag>(m_functor,
+                                                                   block.y);
+
+      if ((nwork == 0)
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+          || Kokkos::Impl::CudaInternal::cuda_use_serial_execution()
+#endif
+      ) {
+        block = dim3(1, 1, 1);
+        grid  = dim3(1, 1, 1);
+      }
+
+      CudaParallelLaunch<ParallelReduce, LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+
+      if (!m_result_ptr_device_accessible) {
+        if (m_result_ptr) {
+          if (m_unified_space) {
+            m_policy.space().fence(
+                "Kokkos::Impl::ParallelReduce<Cuda, RangePolicy>::execute: "
+                "Result "
+                "Not Device Accessible");
+            const int count = Analysis::value_count(
+                ReducerConditional::select(m_functor, m_reducer));
+            for (int i = 0; i < count; ++i) {
+              m_result_ptr[i] = pointer_type(m_unified_space)[i];
+            }
+          } else {
+            const int size = Analysis::value_size(
+                ReducerConditional::select(m_functor, m_reducer));
+            DeepCopy<HostSpace, CudaSpace, Cuda>(m_policy.space(), m_result_ptr,
+                                                 m_scratch_space, size);
+          }
+        }
+      }
+    } else {
+      if (m_result_ptr) {
+        // TODO @graph We need to effectively insert this in to the graph
+        final_reducer.init(m_result_ptr);
+      }
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(
+      const FunctorType& arg_functor, const Policy& arg_policy,
+      const ViewType& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::CudaSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_unified_space(nullptr) {
+    check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
+  }
+
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::CudaSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_unified_space(nullptr) {
+    check_reduced_view_shmem_size<WorkTag>(m_policy, m_functor);
+  }
+};
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>, Kokkos::Cuda> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using WorkRange    = typename Policy::WorkRange;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using Analysis = Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN,
+                                                 Policy, FunctorType>;
+
+ public:
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  using size_type      = Cuda::size_type;
+
+ private:
+  // Algorithmic constraints:
+  //  (a) blockDim.y is a power of two
+  //  (b) blockDim.y == blockDim.z == 1
+  //  (c) gridDim.x  <= blockDim.y * blockDim.y
+  //  (d) gridDim.y  == gridDim.z == 1
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  size_type* m_scratch_space;
+  size_type* m_scratch_flags;
+  size_type m_final;
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+  bool m_run_serial;
+#endif
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update, const bool final_result) const {
+    m_functor(i, update, final_result);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update, const bool final_result) const {
+    m_functor(TagType(), i, update, final_result);
+  }
+
+  //----------------------------------------
+
+  __device__ inline void initial() const {
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+
+    size_type* const shared_value =
+        kokkos_impl_cuda_shared_memory<size_type>() +
+        word_count.value * threadIdx.y;
+
+    final_reducer.init(reinterpret_cast<pointer_type>(shared_value));
+
+    // Number of blocks is bounded so that the reduction can be limited to two
+    // passes. Each thread block is given an approximately equal amount of work
+    // to perform. Accumulate the values for this block. The accumulation
+    // ordering does not match the final pass, but is arithmatically equivalent.
+
+    const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+    for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+         iwork < iwork_end; iwork += blockDim.y) {
+      this->template exec_range<WorkTag>(
+          iwork,
+          final_reducer.reference(reinterpret_cast<pointer_type>(shared_value)),
+          false);
+    }
+
+    // Reduce and scan, writing out scan of blocks' totals and block-groups'
+    // totals. Blocks' scan values are written to 'blockIdx.x' location.
+    // Block-groups' scan values are at: i = ( j * blockDim.y - 1 ) for i <
+    // gridDim.x
+    cuda_single_inter_block_reduce_scan<true>(
+        final_reducer, blockIdx.x, gridDim.x,
+        kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+        m_scratch_flags);
+  }
+
+  //----------------------------------------
+
+  __device__ inline void final() const {
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+
+    // Use shared memory as an exclusive scan: { 0 , value[0] , value[1] ,
+    // value[2] , ... }
+    size_type* const shared_data = kokkos_impl_cuda_shared_memory<size_type>();
+    size_type* const shared_prefix =
+        shared_data + word_count.value * threadIdx.y;
+    size_type* const shared_accum =
+        shared_data + word_count.value * (blockDim.y + 1);
+
+    // Starting value for this thread block is the previous block's total.
+    if (blockIdx.x) {
+      size_type* const block_total =
+          m_scratch_space + word_count.value * (blockIdx.x - 1);
+      for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+        shared_accum[i] = block_total[i];
+      }
+    } else if (0 == threadIdx.y) {
+      final_reducer.init(reinterpret_cast<pointer_type>(shared_accum));
+    }
+
+    const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+    for (typename Policy::member_type iwork_base = range.begin();
+         iwork_base < range.end(); iwork_base += blockDim.y) {
+      unsigned MASK                            = __activemask();
+      const typename Policy::member_type iwork = iwork_base + threadIdx.y;
+
+      __syncthreads();  // Don't overwrite previous iteration values until they
+                        // are used
+
+      final_reducer.init(
+          reinterpret_cast<pointer_type>(shared_prefix + word_count.value));
+
+      // Copy previous block's accumulation total into thread[0] prefix and
+      // inclusive scan value of this block
+      for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+        shared_data[i + word_count.value] = shared_data[i] = shared_accum[i];
+      }
+      __syncwarp(MASK);
+      if (CudaTraits::WarpSize < word_count.value) {
+        __syncthreads();
+      }  // Protect against large scan values.
+
+      // Call functor to accumulate inclusive scan value for this work item
+      if (iwork < range.end()) {
+        this->template exec_range<WorkTag>(
+            iwork,
+            final_reducer.reference(reinterpret_cast<pointer_type>(
+                shared_prefix + word_count.value)),
+            false);
+      }
+
+      // Scan block values into locations shared_data[1..blockDim.y]
+      cuda_intra_block_reduce_scan<true>(
+          final_reducer,
+          typename Analysis::pointer_type(shared_data + word_count.value));
+
+      {
+        size_type* const block_total =
+            shared_data + word_count.value * blockDim.y;
+        for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+          shared_accum[i] = block_total[i];
+        }
+      }
+
+      // Call functor with exclusive scan value
+      if (iwork < range.end()) {
+        this->template exec_range<WorkTag>(
+            iwork,
+            final_reducer.reference(
+                reinterpret_cast<pointer_type>(shared_prefix)),
+            true);
+      }
+    }
+  }
+
+ public:
+  Policy const& get_policy() const { return m_policy; }
+
+  //----------------------------------------
+
+  __device__ inline void operator()() const {
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+    if (m_run_serial) {
+      typename Analysis::value_type value;
+      ValueInit::init(m_functor, (void*)&value);
+      const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+      for (typename Policy::member_type iwork_base = range.begin();
+           iwork_base < range.end(); iwork_base++) {
+        this->template exec_range<WorkTag>(iwork_base, value, true);
+      }
+    } else {
+#endif
+      if (!m_final) {
+        initial();
+      } else {
+        final();
+      }
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+    }
+#endif
+  }
+
+  // Determine block size constrained by shared memory:
+  inline unsigned local_block_size(const FunctorType& f) {
+    // blockDim.y must be power of two = 128 (4 warps) or 256 (8 warps) or 512
+    // (16 warps) gridDim.x <= blockDim.y * blockDim.y
+    //
+    // 4 warps was 10% faster than 8 warps and 20% faster than 16 warps in unit
+    // testing
+
+    unsigned n = CudaTraits::WarpSize * 4;
+    while (n && unsigned(m_policy.space()
+                             .impl_internal_space_instance()
+                             ->m_maxShmemPerBlock) <
+                    cuda_single_inter_block_reduce_scan_shmem<true, FunctorType,
+                                                              WorkTag>(f, n)) {
+      n >>= 1;
+    }
+    return n;
+  }
+
+  inline void execute() {
+    const auto nwork = m_policy.end() - m_policy.begin();
+    if (nwork) {
+      constexpr int GridMaxComputeCapability_2x = 0x0ffff;
+
+      const int block_size = local_block_size(m_functor);
+      KOKKOS_ASSERT(block_size > 0);
+
+      const int grid_max =
+          (block_size * block_size) < GridMaxComputeCapability_2x
+              ? (block_size * block_size)
+              : GridMaxComputeCapability_2x;
+
+      // At most 'max_grid' blocks:
+      const int max_grid =
+          std::min(int(grid_max), int((nwork + block_size - 1) / block_size));
+
+      // How much work per block:
+      const int work_per_block = (nwork + max_grid - 1) / max_grid;
+
+      // How many block are really needed for this much work:
+      const int grid_x = (nwork + work_per_block - 1) / work_per_block;
+
+      m_scratch_space = cuda_internal_scratch_space(
+          m_policy.space(), Analysis::value_size(m_functor) * grid_x);
+      m_scratch_flags =
+          cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type) * 1);
+
+      dim3 grid(grid_x, 1, 1);
+      dim3 block(1, block_size, 1);  // REQUIRED DIMENSIONS ( 1 , N , 1 )
+      const int shmem = Analysis::value_size(m_functor) * (block_size + 2);
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+      if (m_run_serial) {
+        block = dim3(1, 1, 1);
+        grid  = dim3(1, 1, 1);
+      } else {
+#endif
+        m_final = false;
+        CudaParallelLaunch<ParallelScan, LaunchBounds>(
+            *this, grid, block, shmem,
+            m_policy.space().impl_internal_space_instance(),
+            false);  // copy to device and execute
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+      }
+#endif
+      m_final = true;
+      CudaParallelLaunch<ParallelScan, LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+    }
+  }
+
+  ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_final(false)
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+        ,
+        m_run_serial(Kokkos::Impl::CudaInternal::cuda_use_serial_execution())
+#endif
+  {
+  }
+};
+
+//----------------------------------------------------------------------------
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::Cuda> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using WorkRange    = typename Policy::WorkRange;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using Analysis = Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN,
+                                                 Policy, FunctorType>;
+
+ public:
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  using size_type      = Cuda::size_type;
+
+ private:
+  // Algorithmic constraints:
+  //  (a) blockDim.y is a power of two
+  //  (b) blockDim.y == blockDim.z == 1
+  //  (c) gridDim.x  <= blockDim.y * blockDim.y
+  //  (d) gridDim.y  == gridDim.z == 1
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  size_type* m_scratch_space;
+  size_type* m_scratch_flags;
+  size_type m_final;
+  ReturnType& m_returnvalue;
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+  bool m_run_serial;
+#endif
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update, const bool final_result) const {
+    m_functor(i, update, final_result);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update, const bool final_result) const {
+    m_functor(TagType(), i, update, final_result);
+  }
+
+  //----------------------------------------
+
+  __device__ inline void initial() const {
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+
+    size_type* const shared_value =
+        kokkos_impl_cuda_shared_memory<size_type>() +
+        word_count.value * threadIdx.y;
+
+    final_reducer.init(reinterpret_cast<pointer_type>(shared_value));
+
+    // Number of blocks is bounded so that the reduction can be limited to two
+    // passes. Each thread block is given an approximately equal amount of work
+    // to perform. Accumulate the values for this block. The accumulation
+    // ordering does not match the final pass, but is arithmatically equivalent.
+
+    const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+    for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+         iwork < iwork_end; iwork += blockDim.y) {
+      this->template exec_range<WorkTag>(
+          iwork,
+          final_reducer.reference(reinterpret_cast<pointer_type>(shared_value)),
+          false);
+    }
+
+    // Reduce and scan, writing out scan of blocks' totals and block-groups'
+    // totals. Blocks' scan values are written to 'blockIdx.x' location.
+    // Block-groups' scan values are at: i = ( j * blockDim.y - 1 ) for i <
+    // gridDim.x
+    cuda_single_inter_block_reduce_scan<true>(
+        final_reducer, blockIdx.x, gridDim.x,
+        kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+        m_scratch_flags);
+  }
+
+  //----------------------------------------
+
+  __device__ inline void final() const {
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+
+    // Use shared memory as an exclusive scan: { 0 , value[0] , value[1] ,
+    // value[2] , ... }
+    size_type* const shared_data = kokkos_impl_cuda_shared_memory<size_type>();
+    size_type* const shared_prefix =
+        shared_data + word_count.value * threadIdx.y;
+    size_type* const shared_accum =
+        shared_data + word_count.value * (blockDim.y + 1);
+
+    // Starting value for this thread block is the previous block's total.
+    if (blockIdx.x) {
+      size_type* const block_total =
+          m_scratch_space + word_count.value * (blockIdx.x - 1);
+      for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+        shared_accum[i] = block_total[i];
+      }
+    } else if (0 == threadIdx.y) {
+      final_reducer.init(reinterpret_cast<pointer_type>(shared_accum));
+    }
+
+    const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+    for (typename Policy::member_type iwork_base = range.begin();
+         iwork_base < range.end(); iwork_base += blockDim.y) {
+      unsigned MASK = __activemask();
+
+      const typename Policy::member_type iwork = iwork_base + threadIdx.y;
+
+      __syncthreads();  // Don't overwrite previous iteration values until they
+                        // are used
+
+      final_reducer.init(
+          reinterpret_cast<pointer_type>(shared_prefix + word_count.value));
+
+      // Copy previous block's accumulation total into thread[0] prefix and
+      // inclusive scan value of this block
+      for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+        shared_data[i + word_count.value] = shared_data[i] = shared_accum[i];
+      }
+
+      __syncwarp(MASK);
+      if (CudaTraits::WarpSize < word_count.value) {
+        __syncthreads();
+      }  // Protect against large scan values.
+
+      // Call functor to accumulate inclusive scan value for this work item
+      if (iwork < range.end()) {
+        this->template exec_range<WorkTag>(
+            iwork,
+            final_reducer.reference(reinterpret_cast<pointer_type>(
+                shared_prefix + word_count.value)),
+            false);
+      }
+
+      // Scan block values into locations shared_data[1..blockDim.y]
+      cuda_intra_block_reduce_scan<true>(
+          final_reducer,
+          typename Analysis::pointer_type(shared_data + word_count.value));
+
+      {
+        size_type* const block_total =
+            shared_data + word_count.value * blockDim.y;
+        for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+          shared_accum[i] = block_total[i];
+        }
+      }
+
+      // Call functor with exclusive scan value
+      if (iwork < range.end()) {
+        this->template exec_range<WorkTag>(
+            iwork,
+            final_reducer.reference(
+                reinterpret_cast<pointer_type>(shared_prefix)),
+            true);
+      }
+    }
+  }
+
+ public:
+  Policy const& get_policy() const { return m_policy; }
+
+  //----------------------------------------
+
+  __device__ inline void operator()() const {
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+    if (m_run_serial) {
+      typename Analysis::value_type value;
+      ValueInit::init(m_functor, (void*)&value);
+      const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+      for (typename Policy::member_type iwork_base = range.begin();
+           iwork_base < range.end(); iwork_base++) {
+        this->template exec_range<WorkTag>(iwork_base, value, true);
+      }
+      *((typename Analysis::value_type*)m_scratch_space) = value;
+    } else {
+#endif
+      if (!m_final) {
+        initial();
+      } else {
+        final();
+      }
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+    }
+#endif
+  }
+
+  // Determine block size constrained by shared memory:
+  inline unsigned local_block_size(const FunctorType& f) {
+    // blockDim.y must be power of two = 128 (4 warps) or 256 (8 warps) or 512
+    // (16 warps) gridDim.x <= blockDim.y * blockDim.y
+    //
+    // 4 warps was 10% faster than 8 warps and 20% faster than 16 warps in unit
+    // testing
+
+    unsigned n = CudaTraits::WarpSize * 4;
+    while (n && unsigned(m_policy.space()
+                             .impl_internal_space_instance()
+                             ->m_maxShmemPerBlock) <
+                    cuda_single_inter_block_reduce_scan_shmem<true, FunctorType,
+                                                              WorkTag>(f, n)) {
+      n >>= 1;
+    }
+    return n;
+  }
+
+  inline void execute() {
+    const auto nwork = m_policy.end() - m_policy.begin();
+    if (nwork) {
+      enum { GridMaxComputeCapability_2x = 0x0ffff };
+
+      const int block_size = local_block_size(m_functor);
+      KOKKOS_ASSERT(block_size > 0);
+
+      const int grid_max =
+          (block_size * block_size) < GridMaxComputeCapability_2x
+              ? (block_size * block_size)
+              : GridMaxComputeCapability_2x;
+
+      // At most 'max_grid' blocks:
+      const int max_grid =
+          std::min(int(grid_max), int((nwork + block_size - 1) / block_size));
+
+      // How much work per block:
+      const int work_per_block = (nwork + max_grid - 1) / max_grid;
+
+      // How many block are really needed for this much work:
+      const int grid_x = (nwork + work_per_block - 1) / work_per_block;
+
+      m_scratch_space = cuda_internal_scratch_space(
+          m_policy.space(), Analysis::value_size(m_functor) * grid_x);
+      m_scratch_flags =
+          cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type) * 1);
+
+      dim3 grid(grid_x, 1, 1);
+      dim3 block(1, block_size, 1);  // REQUIRED DIMENSIONS ( 1 , N , 1 )
+      const int shmem = Analysis::value_size(m_functor) * (block_size + 2);
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+      if (m_run_serial) {
+        block = dim3(1, 1, 1);
+        grid  = dim3(1, 1, 1);
+      } else {
+#endif
+
+        m_final = false;
+        CudaParallelLaunch<ParallelScanWithTotal, LaunchBounds>(
+            *this, grid, block, shmem,
+            m_policy.space().impl_internal_space_instance(),
+            false);  // copy to device and execute
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+      }
+#endif
+      m_final = true;
+      CudaParallelLaunch<ParallelScanWithTotal, LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+
+      const int size = Analysis::value_size(m_functor);
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+      if (m_run_serial)
+        DeepCopy<HostSpace, CudaSpace, Cuda>(m_policy.space(), &m_returnvalue,
+                                             m_scratch_space, size);
+      else
+#endif
+        DeepCopy<HostSpace, CudaSpace, Cuda>(
+            m_policy.space(), &m_returnvalue,
+            m_scratch_space + (grid_x - 1) * size / sizeof(int), size);
+    }
+  }
+
+  ParallelScanWithTotal(const FunctorType& arg_functor,
+                        const Policy& arg_policy, ReturnType& arg_returnvalue)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_final(false),
+        m_returnvalue(arg_returnvalue)
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+        ,
+        m_run_serial(Kokkos::Impl::CudaInternal::cuda_use_serial_execution())
+#endif
+  {
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_Team.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Parallel_Team.hpp
new file mode 100644 (file)
index 0000000..cdd1608
--- /dev/null
@@ -0,0 +1,1139 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_PARALLEL_TEAM_HPP
+#define KOKKOS_CUDA_PARALLEL_TEAM_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <algorithm>
+#include <string>
+#include <cstdio>
+#include <cstdint>
+
+#include <utility>
+#include <Kokkos_Parallel.hpp>
+
+#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
+#include <Cuda/Kokkos_Cuda_ReduceScan.hpp>
+#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+#include <Cuda/Kokkos_Cuda_Locks.hpp>
+#include <Cuda/Kokkos_Cuda_Team.hpp>
+#include <Kokkos_MinMaxClamp.hpp>
+#include <Kokkos_Vectorization.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+#include <typeinfo>
+
+#include <impl/KokkosExp_IterateTileGPU.hpp>
+
+namespace Kokkos {
+
+extern bool show_warnings() noexcept;
+
+namespace Impl {
+
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Cuda, Properties...>
+    : public PolicyTraits<Properties...> {
+ public:
+  //! Tag this class as a kokkos execution policy
+  using execution_policy = TeamPolicyInternal;
+
+  using traits = PolicyTraits<Properties...>;
+
+  template <class ExecSpace, class... OtherProperties>
+  friend class TeamPolicyInternal;
+
+ private:
+  static constexpr int MAX_WARP = 8;
+
+  typename traits::execution_space m_space;
+  int m_league_size;
+  int m_team_size;
+  int m_vector_length;
+  size_t m_team_scratch_size[2];
+  size_t m_thread_scratch_size[2];
+  int m_chunk_size;
+  bool m_tune_team;
+  bool m_tune_vector;
+
+ public:
+  //! Execution space of this execution policy
+  using execution_space = Kokkos::Cuda;
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(const TeamPolicyInternal<OtherProperties...>& p) {
+    m_league_size            = p.m_league_size;
+    m_team_size              = p.m_team_size;
+    m_vector_length          = p.m_vector_length;
+    m_team_scratch_size[0]   = p.m_team_scratch_size[0];
+    m_team_scratch_size[1]   = p.m_team_scratch_size[1];
+    m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+    m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+    m_chunk_size             = p.m_chunk_size;
+    m_space                  = p.m_space;
+    m_tune_team              = p.m_tune_team;
+    m_tune_vector            = p.m_tune_vector;
+  }
+
+  //----------------------------------------
+
+  template <class FunctorType>
+  int team_size_max(const FunctorType& f, const ParallelForTag&) const {
+    using closure_type =
+        Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<closure_type, typename traits::launch_bounds>::
+            get_cuda_func_attributes();
+    int block_size =
+        Kokkos::Impl::cuda_get_max_block_size<FunctorType,
+                                              typename traits::launch_bounds>(
+            space().impl_internal_space_instance(), attr, f,
+            (size_t)impl_vector_length(),
+            (size_t)team_scratch_size(0) + 2 * sizeof(double),
+            (size_t)thread_scratch_size(0) + sizeof(double));
+    return block_size / impl_vector_length();
+  }
+
+  template <class FunctorType>
+  inline int team_size_max(const FunctorType& f,
+                           const ParallelReduceTag&) const {
+    using functor_analysis_type =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                              TeamPolicyInternal, FunctorType>;
+    using reducer_type = typename Impl::ParallelReduceReturnValue<
+        void, typename functor_analysis_type::value_type,
+        FunctorType>::reducer_type;
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             reducer_type>;
+    return internal_team_size_max<closure_type>(f);
+  }
+
+  template <class FunctorType, class ReducerType>
+  inline int team_size_max(const FunctorType& f, const ReducerType& /*r*/,
+                           const ParallelReduceTag&) const {
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             ReducerType>;
+    return internal_team_size_max<closure_type>(f);
+  }
+
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType& f, const ParallelForTag&) const {
+    using closure_type =
+        Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<closure_type, typename traits::launch_bounds>::
+            get_cuda_func_attributes();
+    const int block_size =
+        Kokkos::Impl::cuda_get_opt_block_size<FunctorType,
+                                              typename traits::launch_bounds>(
+            space().impl_internal_space_instance(), attr, f,
+            (size_t)impl_vector_length(),
+            (size_t)team_scratch_size(0) + 2 * sizeof(double),
+            (size_t)thread_scratch_size(0) + sizeof(double));
+    return block_size / impl_vector_length();
+  }
+
+  template <class FunctorType>
+  inline int team_size_recommended(const FunctorType& f,
+                                   const ParallelReduceTag&) const {
+    using functor_analysis_type =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                              TeamPolicyInternal, FunctorType>;
+    using reducer_type = typename Impl::ParallelReduceReturnValue<
+        void, typename functor_analysis_type::value_type,
+        FunctorType>::reducer_type;
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             reducer_type>;
+    return internal_team_size_recommended<closure_type>(f);
+  }
+
+  template <class FunctorType, class ReducerType>
+  int team_size_recommended(const FunctorType& f, const ReducerType&,
+                            const ParallelReduceTag&) const {
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             ReducerType>;
+    return internal_team_size_recommended<closure_type>(f);
+  }
+
+  inline static int vector_length_max() { return Impl::CudaTraits::WarpSize; }
+
+  inline static int verify_requested_vector_length(
+      int requested_vector_length) {
+    int test_vector_length =
+        std::min(requested_vector_length, vector_length_max());
+
+    // Allow only power-of-two vector_length
+    if (!(is_integral_power_of_two(test_vector_length))) {
+      int test_pow2 = 1;
+      for (int i = 0; i < 5; i++) {
+        test_pow2 = test_pow2 << 1;
+        if (test_pow2 > test_vector_length) {
+          break;
+        }
+      }
+      test_vector_length = test_pow2 >> 1;
+    }
+
+    return test_vector_length;
+  }
+
+  inline static int scratch_size_max(int level) {
+    return (
+        level == 0 ? 1024 * 40 :  // 48kB is the max for CUDA, but we need some
+                                  // for team_member.reduce etc.
+            20 * 1024 *
+                1024);  // arbitrarily setting this to 20MB, for a Volta V100
+                        // that would give us about 3.2GB for 2 teams per SM
+  }
+
+  //----------------------------------------
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  KOKKOS_DEPRECATED inline int vector_length() const {
+    return impl_vector_length();
+  }
+#endif
+  inline int impl_vector_length() const { return m_vector_length; }
+  inline int team_size() const { return m_team_size; }
+  inline int league_size() const { return m_league_size; }
+  inline bool impl_auto_team_size() const { return m_tune_team; }
+  inline bool impl_auto_vector_length() const { return m_tune_vector; }
+  inline void impl_set_team_size(size_t team_size) { m_team_size = team_size; }
+  inline void impl_set_vector_length(size_t vector_length) {
+    m_vector_length = vector_length;
+  }
+  size_t scratch_size(int level, int team_size_ = -1) const {
+    if (team_size_ < 0) team_size_ = m_team_size;
+    return m_team_scratch_size[level] +
+           team_size_ * m_thread_scratch_size[level];
+  }
+  size_t team_scratch_size(int level) const {
+    return m_team_scratch_size[level];
+  }
+  size_t thread_scratch_size(int level) const {
+    return m_thread_scratch_size[level];
+  }
+
+  const typename traits::execution_space& space() const { return m_space; }
+
+  TeamPolicyInternal()
+      : m_space(typename traits::execution_space()),
+        m_league_size(0),
+        m_team_size(-1),
+        m_vector_length(0),
+        m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(Impl::CudaTraits::WarpSize),
+        m_tune_team(false),
+        m_tune_vector(false) {}
+
+  /** \brief  Specify league size, specify team size, specify vector length */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     int team_size_request, int vector_length_request = 1)
+      : m_space(space_),
+        m_league_size(league_size_),
+        m_team_size(team_size_request),
+        m_vector_length(
+            (vector_length_request > 0)
+                ? verify_requested_vector_length(vector_length_request)
+                : verify_requested_vector_length(1)),
+        m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(Impl::CudaTraits::WarpSize),
+        m_tune_team(bool(team_size_request <= 0)),
+        m_tune_vector(bool(vector_length_request <= 0)) {
+    // Make sure league size is permissible
+    if (league_size_ >= int(Impl::cuda_internal_maximum_grid_count()[0]))
+      Impl::throw_runtime_exception(
+          "Requested too large league_size for TeamPolicy on Cuda execution "
+          "space.");
+
+    // Make sure total block size is permissible
+    if (m_team_size * m_vector_length >
+        int(Impl::CudaTraits::MaxHierarchicalParallelism)) {
+      Impl::throw_runtime_exception(
+          std::string("Kokkos::TeamPolicy< Cuda > the team size is too large. "
+                      "Team size x vector length must be smaller than 1024."));
+    }
+  }
+
+  /** \brief  Specify league size, request team size, specify vector length */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(space_, league_size_, -1, vector_length_request) {}
+
+  /** \brief  Specify league size, request team size and vector length */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+                     )
+      : TeamPolicyInternal(space_, league_size_, -1, -1) {}
+
+  /** \brief  Specify league size, specify team size, request vector length */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     int team_size_request, const Kokkos::AUTO_t&)
+      : TeamPolicyInternal(space_, league_size_, team_size_request, -1) {}
+
+  TeamPolicyInternal(int league_size_, int team_size_request,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, vector_length_request) {}
+
+  TeamPolicyInternal(int league_size_, const Kokkos::AUTO_t& team_size_request,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, vector_length_request)
+
+  {}
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(int league_size_, const Kokkos::AUTO_t& team_size_request,
+                     const Kokkos::AUTO_t& vector_length_request)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, vector_length_request) {}
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(int league_size_, int team_size_request,
+                     const Kokkos::AUTO_t& vector_length_request)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, vector_length_request) {}
+
+  inline int chunk_size() const { return m_chunk_size; }
+
+  /** \brief set chunk_size to a discrete value*/
+  inline TeamPolicyInternal& set_chunk_size(
+      typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  /** \brief set per team scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(const int& level,
+                                              const PerTeamValue& per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  /** \brief set per thread scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerThreadValue& per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  /** \brief set per thread and per team scratch size for a specific level of
+   * the scratch hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerTeamValue& per_team,
+      const PerThreadValue& per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  using member_type = Kokkos::Impl::CudaTeamMember;
+
+ protected:
+  template <class ClosureType, class FunctorType, class BlockSizeCallable>
+  int internal_team_size_common(const FunctorType& f,
+                                BlockSizeCallable&& block_size_callable) const {
+    using closure_type = ClosureType;
+    using Interface =
+        typename Impl::DeduceFunctorPatternInterface<ClosureType>::type;
+    using Analysis =
+        Impl::FunctorAnalysis<Interface, typename ClosureType::Policy,
+                              FunctorType>;
+
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<closure_type, typename traits::launch_bounds>::
+            get_cuda_func_attributes();
+    const int block_size = std::forward<BlockSizeCallable>(block_size_callable)(
+        space().impl_internal_space_instance(), attr, f,
+        (size_t)impl_vector_length(),
+        (size_t)team_scratch_size(0) + 2 * sizeof(double),
+        (size_t)thread_scratch_size(0) + sizeof(double) +
+            ((Analysis::StaticValueSize != 0) ? 0 : Analysis::value_size(f)));
+    KOKKOS_ASSERT(block_size > 0);
+
+    // Currently we require Power-of-2 team size for reductions.
+    int p2 = 1;
+    while (p2 <= block_size) p2 *= 2;
+    p2 /= 2;
+    return p2 / impl_vector_length();
+  }
+
+  template <class ClosureType, class FunctorType>
+  int internal_team_size_max(const FunctorType& f) const {
+    return internal_team_size_common<ClosureType>(
+        f,
+        Kokkos::Impl::cuda_get_max_block_size<FunctorType,
+                                              typename traits::launch_bounds>);
+  }
+
+  template <class ClosureType, class FunctorType>
+  int internal_team_size_recommended(const FunctorType& f) const {
+    return internal_team_size_common<ClosureType>(
+        f,
+        Kokkos::Impl::cuda_get_opt_block_size<FunctorType,
+                                              typename traits::launch_bounds>);
+  }
+};
+
+__device__ inline int64_t cuda_get_scratch_index(Cuda::size_type league_size,
+                                                 int32_t* scratch_locks) {
+  int64_t threadid = 0;
+  __shared__ int64_t base_thread_id;
+  if (threadIdx.x == 0 && threadIdx.y == 0) {
+    int64_t const wraparound_len = Kokkos::max(
+        int64_t(1), Kokkos::min(int64_t(league_size),
+                                (int64_t(g_device_cuda_lock_arrays.n)) /
+                                    (blockDim.x * blockDim.y)));
+    threadid = (blockIdx.x * blockDim.z + threadIdx.z) % wraparound_len;
+    threadid *= blockDim.x * blockDim.y;
+    int done = 0;
+    while (!done) {
+      done = (0 == atomicCAS(&scratch_locks[threadid], 0, 1));
+      if (!done) {
+        threadid += blockDim.x * blockDim.y;
+        if (int64_t(threadid + blockDim.x * blockDim.y) >=
+            wraparound_len * blockDim.x * blockDim.y)
+          threadid = 0;
+      }
+    }
+    base_thread_id = threadid;
+  }
+  __syncthreads();
+  threadid = base_thread_id;
+  return threadid;
+}
+
+__device__ inline void cuda_release_scratch_index(int32_t* scratch_locks,
+                                                  int64_t threadid) {
+  __syncthreads();
+  if (threadIdx.x == 0 && threadIdx.y == 0) {
+    scratch_locks[threadid] = 0;
+  }
+}
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::Cuda> {
+ public:
+  using Policy = TeamPolicy<Properties...>;
+
+ private:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+ public:
+  using functor_type = FunctorType;
+  using size_type    = Cuda::size_type;
+
+ private:
+  // Algorithmic constraints: blockDim.y is a power of two AND blockDim.y ==
+  // blockDim.z == 1 shared memory utilization:
+  //
+  //  [ team   reduce space ]
+  //  [ team   shared space ]
+  //
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const size_type m_league_size;
+  int m_team_size;
+  const size_type m_vector_size;
+  int m_shmem_begin;
+  int m_shmem_size;
+  void* m_scratch_ptr[2];
+  size_t m_scratch_size[2];
+  int m_scratch_pool_id = -1;
+  int32_t* m_scratch_locks;
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
+      const Member& member) const {
+    m_functor(member);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
+      const Member& member) const {
+    m_functor(TagType(), member);
+  }
+
+ public:
+  Policy const& get_policy() const { return m_policy; }
+
+  __device__ inline void operator()() const {
+    // Iterate this block through the league
+    int64_t threadid = 0;
+    if (m_scratch_size[1] > 0) {
+      threadid = cuda_get_scratch_index(m_league_size, m_scratch_locks);
+    }
+
+    const int int_league_size = (int)m_league_size;
+    for (int league_rank = blockIdx.x; league_rank < int_league_size;
+         league_rank += gridDim.x) {
+      this->template exec_team<WorkTag>(typename Policy::member_type(
+          kokkos_impl_cuda_shared_memory<void>(), m_shmem_begin, m_shmem_size,
+          (void*)(((char*)m_scratch_ptr[1]) +
+                  ptrdiff_t(threadid / (blockDim.x * blockDim.y)) *
+                      m_scratch_size[1]),
+          m_scratch_size[1], league_rank, m_league_size));
+    }
+    if (m_scratch_size[1] > 0) {
+      cuda_release_scratch_index(m_scratch_locks, threadid);
+    }
+  }
+
+  inline void execute() const {
+    const int64_t shmem_size_total = m_shmem_begin + m_shmem_size;
+    dim3 grid(int(m_league_size), 1, 1);
+    const dim3 block(int(m_vector_size), int(m_team_size), 1);
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+    if (Kokkos::Impl::CudaInternal::cuda_use_serial_execution()) {
+      grid = dim3(1, 1, 1);
+    }
+#endif
+
+    CudaParallelLaunch<ParallelFor, LaunchBounds>(
+        *this, grid, block, shmem_size_total,
+        m_policy.space().impl_internal_space_instance(),
+        true);  // copy to device and execute
+  }
+
+  ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()) {
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<ParallelFor,
+                           LaunchBounds>::get_cuda_func_attributes();
+    m_team_size =
+        m_team_size >= 0
+            ? m_team_size
+            : Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
+                  m_policy.space().impl_internal_space_instance(), attr,
+                  m_functor, m_vector_size, m_policy.team_scratch_size(0),
+                  m_policy.thread_scratch_size(0)) /
+                  m_vector_size;
+
+    m_shmem_begin = (sizeof(double) * (m_team_size + 2));
+    m_shmem_size =
+        (m_policy.scratch_size(0, m_team_size) +
+         FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
+    m_scratch_size[0] = m_policy.scratch_size(0, m_team_size);
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+    m_scratch_locks =
+        m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+    m_scratch_ptr[0] = nullptr;
+    if (m_team_size <= 0) {
+      m_scratch_ptr[1] = nullptr;
+    } else {
+      auto scratch_ptr_id =
+          m_policy.space()
+              .impl_internal_space_instance()
+              ->resize_team_scratch_space(
+                  static_cast<std::int64_t>(m_scratch_size[1]) *
+                  (std::min(
+                      static_cast<std::int64_t>(Cuda::concurrency() /
+                                                (m_team_size * m_vector_size)),
+                      static_cast<std::int64_t>(m_league_size))));
+      m_scratch_ptr[1]  = scratch_ptr_id.first;
+      m_scratch_pool_id = scratch_ptr_id.second;
+    }
+
+    const int shmem_size_total = m_shmem_begin + m_shmem_size;
+    if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+        shmem_size_total) {
+      printf(
+          "%i %i\n",
+          m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock,
+          shmem_size_total);
+      Kokkos::Impl::throw_runtime_exception(std::string(
+          "Kokkos::Impl::ParallelFor< Cuda > insufficient shared memory"));
+    }
+
+    if (int(m_team_size) >
+        int(Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
+                m_policy.space().impl_internal_space_instance(), attr,
+                arg_functor, arg_policy.impl_vector_length(),
+                arg_policy.team_scratch_size(0),
+                arg_policy.thread_scratch_size(0)) /
+            arg_policy.impl_vector_length())) {
+      Kokkos::Impl::throw_runtime_exception(std::string(
+          "Kokkos::Impl::ParallelFor< Cuda > requested too large team size."));
+    }
+  }
+
+  ~ParallelFor() {
+    if (m_scratch_pool_id >= 0) {
+      m_policy.space()
+          .impl_internal_space_instance()
+          ->m_team_scratch_pool[m_scratch_pool_id] = 0;
+    }
+  }
+};
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::Cuda> {
+ public:
+  using Policy = TeamPolicy<Properties...>;
+
+ private:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis =
+      Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
+                                    ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using value_type     = typename Analysis::value_type;
+
+ public:
+  using functor_type = FunctorType;
+  using size_type    = Cuda::size_type;
+  using reducer_type = ReducerType;
+
+  static constexpr bool UseShflReduction =
+      (true && (Analysis::StaticValueSize != 0));
+
+ private:
+  struct ShflReductionTag {};
+  struct SHMEMReductionTag {};
+
+  // Algorithmic constraints: blockDim.y is a power of two AND blockDim.y ==
+  // blockDim.z == 1 shared memory utilization:
+  //
+  //  [ global reduce space ]
+  //  [ team   reduce space ]
+  //  [ team   shared space ]
+  //
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+  const bool m_result_ptr_host_accessible;
+  size_type* m_scratch_space;
+  size_type* m_scratch_flags;
+  size_type* m_unified_space;
+  size_type m_team_begin;
+  size_type m_shmem_begin;
+  size_type m_shmem_size;
+  void* m_scratch_ptr[2];
+  size_t m_scratch_size[2];
+  int m_scratch_pool_id = -1;
+  int32_t* m_scratch_locks;
+  const size_type m_league_size;
+  int m_team_size;
+  const size_type m_vector_size;
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
+      const Member& member, reference_type update) const {
+    m_functor(member, update);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
+      const Member& member, reference_type update) const {
+    m_functor(TagType(), member, update);
+  }
+
+ public:
+  Policy const& get_policy() const { return m_policy; }
+
+  __device__ inline void operator()() const {
+    int64_t threadid = 0;
+    if (m_scratch_size[1] > 0) {
+      threadid = cuda_get_scratch_index(m_league_size, m_scratch_locks);
+    }
+
+    using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
+                                            SHMEMReductionTag>;
+    run(ReductionTag{}, threadid);
+    if (m_scratch_size[1] > 0) {
+      cuda_release_scratch_index(m_scratch_locks, threadid);
+    }
+  }
+
+  __device__ inline void run(SHMEMReductionTag&, const int& threadid) const {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(
+                       ReducerConditional::select(m_functor, m_reducer)) /
+                   sizeof(size_type));
+
+    reference_type value =
+        final_reducer.init(kokkos_impl_cuda_shared_memory<size_type>() +
+                           threadIdx.y * word_count.value);
+
+    // Iterate this block through the league
+    const int int_league_size = (int)m_league_size;
+    for (int league_rank = blockIdx.x; league_rank < int_league_size;
+         league_rank += gridDim.x) {
+      this->template exec_team<WorkTag>(
+          Member(kokkos_impl_cuda_shared_memory<char>() + m_team_begin,
+                 m_shmem_begin, m_shmem_size,
+                 (void*)(((char*)m_scratch_ptr[1]) +
+                         ptrdiff_t(threadid / (blockDim.x * blockDim.y)) *
+                             m_scratch_size[1]),
+                 m_scratch_size[1], league_rank, m_league_size),
+          value);
+    }
+
+    // Reduce with final value at blockDim.y - 1 location.
+    bool zero_length        = m_league_size == 0;
+    bool do_final_reduction = true;
+    if (!zero_length)
+      do_final_reduction = cuda_single_inter_block_reduce_scan<false>(
+          final_reducer, blockIdx.x, gridDim.x,
+          kokkos_impl_cuda_shared_memory<size_type>(), m_scratch_space,
+          m_scratch_flags);
+
+    if (do_final_reduction) {
+      // This is the final block with the final result at the final threads'
+      // location
+
+      size_type* const shared = kokkos_impl_cuda_shared_memory<size_type>() +
+                                (blockDim.y - 1) * word_count.value;
+      size_type* const global =
+          m_result_ptr_device_accessible
+              ? reinterpret_cast<size_type*>(m_result_ptr)
+              : (m_unified_space ? m_unified_space : m_scratch_space);
+
+      if (threadIdx.y == 0) {
+        final_reducer.final(reinterpret_cast<value_type*>(shared));
+      }
+
+      if (CudaTraits::WarpSize < word_count.value) {
+        __syncthreads();
+      }
+
+      for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+        global[i] = shared[i];
+      }
+    }
+  }
+
+  __device__ inline void run(ShflReductionTag, const int& threadid) const {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    value_type value;
+    final_reducer.init(&value);
+
+    // Iterate this block through the league
+    const int int_league_size = (int)m_league_size;
+    for (int league_rank = blockIdx.x; league_rank < int_league_size;
+         league_rank += gridDim.x) {
+      this->template exec_team<WorkTag>(
+          Member(kokkos_impl_cuda_shared_memory<char>() + m_team_begin,
+                 m_shmem_begin, m_shmem_size,
+                 (void*)(((char*)m_scratch_ptr[1]) +
+                         ptrdiff_t(threadid / (blockDim.x * blockDim.y)) *
+                             m_scratch_size[1]),
+                 m_scratch_size[1], league_rank, m_league_size),
+          value);
+    }
+
+    pointer_type const result =
+        m_result_ptr_device_accessible
+            ? m_result_ptr
+            : (pointer_type)(m_unified_space ? m_unified_space
+                                             : m_scratch_space);
+
+    value_type init;
+    final_reducer.init(&init);
+
+    if (int_league_size == 0) {
+      final_reducer.final(&value);
+      *result = value;
+    } else if (Impl::cuda_inter_block_reduction(value, init, final_reducer,
+                                                m_scratch_space, result,
+                                                m_scratch_flags, blockDim.y)) {
+      const unsigned id = threadIdx.y * blockDim.x + threadIdx.x;
+      if (id == 0) {
+        final_reducer.final(&value);
+        *result = value;
+      }
+    }
+  }
+
+  inline void execute() {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const bool is_empty_range  = m_league_size == 0 || m_team_size == 0;
+    const bool need_device_set = Analysis::has_init_member_function ||
+                                 Analysis::has_final_member_function ||
+                                 !m_result_ptr_host_accessible ||
+#ifdef KOKKOS_CUDA_ENABLE_GRAPHS
+                                 Policy::is_graph_kernel::value ||
+#endif
+                                 !std::is_same<ReducerType, InvalidType>::value;
+    if (!is_empty_range || need_device_set) {
+      const int block_count = std::max(
+          1u, UseShflReduction ? std::min(m_league_size, size_type(1024 * 32))
+                               : std::min(int(m_league_size), m_team_size));
+
+      m_scratch_space = cuda_internal_scratch_space(
+          m_policy.space(), Analysis::value_size(ReducerConditional::select(
+                                m_functor, m_reducer)) *
+                                block_count);
+      m_scratch_flags =
+          cuda_internal_scratch_flags(m_policy.space(), sizeof(size_type));
+      m_unified_space = cuda_internal_scratch_unified(
+          m_policy.space(), Analysis::value_size(ReducerConditional::select(
+                                m_functor, m_reducer)));
+
+      dim3 block(m_vector_size, m_team_size, 1);
+      dim3 grid(block_count, 1, 1);
+      const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
+
+      if (is_empty_range
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
+          || Kokkos::Impl::CudaInternal::cuda_use_serial_execution()
+#endif
+      ) {
+        block = dim3(1, 1, 1);
+        grid  = dim3(1, 1, 1);
+      }
+
+      CudaParallelLaunch<ParallelReduce, LaunchBounds>(
+          *this, grid, block, shmem_size_total,
+          m_policy.space().impl_internal_space_instance(),
+          true);  // copy to device and execute
+
+      if (!m_result_ptr_device_accessible) {
+        m_policy.space().fence(
+            "Kokkos::Impl::ParallelReduce<Cuda, TeamPolicy>::execute: Result "
+            "Not Device Accessible");
+
+        if (m_result_ptr) {
+          if (m_unified_space) {
+            const int count = Analysis::value_count(
+                ReducerConditional::select(m_functor, m_reducer));
+            for (int i = 0; i < count; ++i) {
+              m_result_ptr[i] = pointer_type(m_unified_space)[i];
+            }
+          } else {
+            const int size = Analysis::value_size(
+                ReducerConditional::select(m_functor, m_reducer));
+            DeepCopy<HostSpace, CudaSpace>(m_result_ptr, m_scratch_space, size);
+          }
+        }
+      }
+    } else {
+      if (m_result_ptr) {
+        // TODO @graph We need to effectively insert this in to the graph
+        final_reducer.init(m_result_ptr);
+      }
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(
+      const FunctorType& arg_functor, const Policy& arg_policy,
+      const ViewType& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::CudaSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_unified_space(nullptr),
+        m_team_begin(0),
+        m_shmem_begin(0),
+        m_shmem_size(0),
+        m_scratch_ptr{nullptr, nullptr},
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()) {
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<ParallelReduce,
+                           LaunchBounds>::get_cuda_func_attributes();
+    m_team_size =
+        m_team_size >= 0
+            ? m_team_size
+            : Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
+                  m_policy.space().impl_internal_space_instance(), attr,
+                  m_functor, m_vector_size, m_policy.team_scratch_size(0),
+                  m_policy.thread_scratch_size(0)) /
+                  m_vector_size;
+
+    m_team_begin =
+        UseShflReduction
+            ? 0
+            : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                        WorkTag>(arg_functor,
+                                                                 m_team_size);
+    m_shmem_begin = sizeof(double) * (m_team_size + 2);
+    m_shmem_size =
+        m_policy.scratch_size(0, m_team_size) +
+        FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
+    m_scratch_size[0] = m_shmem_size;
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+    m_scratch_locks =
+        m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+    if (m_team_size <= 0) {
+      m_scratch_ptr[1] = nullptr;
+    } else {
+      auto scratch_ptr_id =
+          m_policy.space()
+              .impl_internal_space_instance()
+              ->resize_team_scratch_space(
+                  static_cast<std::int64_t>(m_scratch_size[1]) *
+                  (std::min(
+                      static_cast<std::int64_t>(Cuda::concurrency() /
+                                                (m_team_size * m_vector_size)),
+                      static_cast<std::int64_t>(m_league_size))));
+      m_scratch_ptr[1]  = scratch_ptr_id.first;
+      m_scratch_pool_id = scratch_ptr_id.second;
+    }
+
+    // The global parallel_reduce does not support vector_length other than 1 at
+    // the moment
+    if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
+          "greater than 1 is not currently supported for CUDA for dynamic "
+          "sized reduction types.");
+
+    if ((m_team_size < 32) && !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
+          "than 32 is not currently supported with CUDA for dynamic sized "
+          "reduction types.");
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+
+    const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
+
+    if (!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
+        !UseShflReduction) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< Cuda > bad team size"));
+    }
+
+    if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+        shmem_size_total) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< Cuda > requested too much "
+                      "L0 scratch memory"));
+    }
+
+    if (int(m_team_size) >
+        arg_policy.team_size_max(m_functor, m_reducer, ParallelReduceTag())) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< Cuda > requested too "
+                      "large team size."));
+    }
+  }
+
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::CudaSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_unified_space(nullptr),
+        m_team_begin(0),
+        m_shmem_begin(0),
+        m_shmem_size(0),
+        m_scratch_ptr{nullptr, nullptr},
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()) {
+    cudaFuncAttributes attr =
+        CudaParallelLaunch<ParallelReduce,
+                           LaunchBounds>::get_cuda_func_attributes();
+
+    // Valid team size not provided, deduce team size
+    m_team_size =
+        m_team_size >= 0
+            ? m_team_size
+            : Kokkos::Impl::cuda_get_opt_block_size<FunctorType, LaunchBounds>(
+                  m_policy.space().impl_internal_space_instance(), attr,
+                  m_functor, m_vector_size, m_policy.team_scratch_size(0),
+                  m_policy.thread_scratch_size(0)) /
+                  m_vector_size;
+
+    m_team_begin =
+        UseShflReduction
+            ? 0
+            : cuda_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                        WorkTag>(arg_functor,
+                                                                 m_team_size);
+    m_shmem_begin = sizeof(double) * (m_team_size + 2);
+    m_shmem_size =
+        m_policy.scratch_size(0, m_team_size) +
+        FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
+    m_scratch_size[0] = m_shmem_size;
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+    m_scratch_locks =
+        m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+    if (m_team_size <= 0) {
+      m_scratch_ptr[1] = nullptr;
+    } else {
+      auto scratch_ptr_id =
+          m_policy.space()
+              .impl_internal_space_instance()
+              ->resize_team_scratch_space(
+                  static_cast<std::int64_t>(m_scratch_size[1]) *
+                  (std::min(
+                      static_cast<std::int64_t>(Cuda::concurrency() /
+                                                (m_team_size * m_vector_size)),
+                      static_cast<std::int64_t>(m_league_size))));
+      m_scratch_ptr[1]  = scratch_ptr_id.first;
+      m_scratch_pool_id = scratch_ptr_id.second;
+    }
+
+    // The global parallel_reduce does not support vector_length other than 1 at
+    // the moment
+    if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
+          "greater than 1 is not currently supported for CUDA for dynamic "
+          "sized reduction types.");
+
+    if ((m_team_size < 32) && !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
+          "than 32 is not currently supported with CUDA for dynamic sized "
+          "reduction types.");
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+
+    const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
+
+    if ((!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
+         !UseShflReduction) ||
+        m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+            shmem_size_total) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< Cuda > bad team size"));
+    }
+
+    size_type team_size_max =
+        Kokkos::Impl::cuda_get_max_block_size<FunctorType, LaunchBounds>(
+            m_policy.space().impl_internal_space_instance(), attr, m_functor,
+            m_vector_size, m_policy.team_scratch_size(0),
+            m_policy.thread_scratch_size(0)) /
+        m_vector_size;
+
+    if ((int)m_team_size > (int)team_size_max) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< Cuda > requested too "
+                      "large team size."));
+    }
+  }
+
+  ~ParallelReduce() {
+    if (m_scratch_pool_id >= 0) {
+      m_policy.space()
+          .impl_internal_space_instance()
+          ->m_team_scratch_pool[m_scratch_pool_id] = 0;
+    }
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+#endif /* defined(KOKKOS_ENABLE_CUDA) */
+#endif /* #ifndef KOKKOS_CUDA_PARALLEL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_ReduceScan.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_ReduceScan.hpp
new file mode 100644 (file)
index 0000000..078315b
--- /dev/null
@@ -0,0 +1,753 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_REDUCESCAN_HPP
+#define KOKKOS_CUDA_REDUCESCAN_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <utility>
+
+#include <Kokkos_Parallel.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <Cuda/Kokkos_Cuda_Vectorization.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+/*
+ *  Algorithmic constraints:
+ *   (a) threads with same threadIdx.y have same value
+ *   (b) blockDim.x == power of two
+ *   (c) blockDim.z == 1
+ */
+
+template <class ValueType, class ReducerType>
+__device__ inline void cuda_intra_warp_reduction(
+    ValueType& result, const ReducerType& reducer,
+    const uint32_t max_active_thread = blockDim.y) {
+  unsigned int shift = 1;
+
+  // Reduce over values from threads with different threadIdx.y
+  while (blockDim.x * shift < 32) {
+    const ValueType tmp = shfl_down(result, blockDim.x * shift, 32u);
+    // Only join if upper thread is active (this allows non power of two for
+    // blockDim.y
+    if (threadIdx.y + shift < max_active_thread) reducer.join(&result, &tmp);
+    shift *= 2;
+  }
+
+  result = shfl(result, 0, 32);
+}
+
+template <class ValueType, class ReducerType>
+__device__ inline void cuda_inter_warp_reduction(
+    ValueType& value, const ReducerType& reducer,
+    const int max_active_thread = blockDim.y) {
+#define STEP_WIDTH 4
+  // Depending on the ValueType _shared__ memory must be aligned up to 8byte
+  // boundaries The reason not to use ValueType directly is that for types with
+  // constructors it could lead to race conditions
+  alignas(alignof(ValueType) > alignof(double) ? alignof(ValueType)
+                                               : alignof(double))
+      __shared__ double sh_result[(sizeof(ValueType) + 7) / 8 * STEP_WIDTH];
+  ValueType* result = (ValueType*)&sh_result;
+  const int step    = 32 / blockDim.x;
+  int shift         = STEP_WIDTH;
+  const int id      = threadIdx.y % step == 0 ? threadIdx.y / step : 65000;
+  if (id < STEP_WIDTH) {
+    result[id] = value;
+  }
+  __syncthreads();
+  while (shift <= max_active_thread / step) {
+    if (shift <= id && shift + STEP_WIDTH > id && threadIdx.x == 0) {
+      reducer.join(&result[id % STEP_WIDTH], &value);
+    }
+    __syncthreads();
+    shift += STEP_WIDTH;
+  }
+
+  value = result[0];
+  for (int i = 1; (i * step < max_active_thread) && i < STEP_WIDTH; i++)
+    reducer.join(&value, &result[i]);
+}
+
+template <class ValueType, class ReducerType>
+__device__ inline void cuda_intra_block_reduction(
+    ValueType& value, const ReducerType& reducer,
+    const int max_active_thread = blockDim.y) {
+  cuda_intra_warp_reduction(value, reducer, max_active_thread);
+  cuda_inter_warp_reduction(value, reducer, max_active_thread);
+}
+
+template <class FunctorType>
+__device__ bool cuda_inter_block_reduction(
+    typename FunctorType::reference_type value,
+    typename FunctorType::reference_type neutral, const FunctorType& reducer,
+    Cuda::size_type* const m_scratch_space,
+    typename FunctorType::pointer_type const /*result*/,
+    Cuda::size_type* const m_scratch_flags,
+    const int max_active_thread = blockDim.y) {
+  using pointer_type = typename FunctorType::pointer_type;
+  using value_type   = typename FunctorType::value_type;
+
+  // Do the intra-block reduction with shfl operations and static shared memory
+  cuda_intra_block_reduction(value, reducer, max_active_thread);
+
+  const int id = threadIdx.y * blockDim.x + threadIdx.x;
+
+  // One thread in the block writes block result to global scratch_memory
+  if (id == 0) {
+    pointer_type global = ((pointer_type)m_scratch_space) + blockIdx.x;
+    *global             = value;
+  }
+
+  // One warp of last block performs inter block reduction through loading the
+  // block values from global scratch_memory
+  bool last_block = false;
+  __threadfence();
+  __syncthreads();
+  if (id < 32) {
+    Cuda::size_type count;
+
+    // Figure out whether this is the last block
+    if (id == 0) count = Kokkos::atomic_fetch_add(m_scratch_flags, 1);
+    count = Kokkos::shfl(count, 0, 32);
+
+    // Last block does the inter block reduction
+    if (count == gridDim.x - 1) {
+      // set flag back to zero
+      if (id == 0) *m_scratch_flags = 0;
+      last_block = true;
+      value      = neutral;
+
+      pointer_type const volatile global = (pointer_type)m_scratch_space;
+
+      // Reduce all global values with splitting work over threads in one warp
+      const int step_size =
+          blockDim.x * blockDim.y < 32 ? blockDim.x * blockDim.y : 32;
+      for (int i = id; i < (int)gridDim.x; i += step_size) {
+        value_type tmp = global[i];
+        reducer.join(&value, &tmp);
+      }
+
+      // Perform shfl reductions within the warp only join if contribution is
+      // valid (allows gridDim.x non power of two and <32)
+      if (int(blockDim.x * blockDim.y) > 1) {
+        value_type tmp = Kokkos::shfl_down(value, 1, 32);
+        if (id + 1 < int(gridDim.x)) reducer.join(&value, &tmp);
+      }
+      unsigned int mask = __activemask();
+      __syncwarp(mask);
+      if (int(blockDim.x * blockDim.y) > 2) {
+        value_type tmp = Kokkos::shfl_down(value, 2, 32);
+        if (id + 2 < int(gridDim.x)) reducer.join(&value, &tmp);
+      }
+      __syncwarp(mask);
+      if (int(blockDim.x * blockDim.y) > 4) {
+        value_type tmp = Kokkos::shfl_down(value, 4, 32);
+        if (id + 4 < int(gridDim.x)) reducer.join(&value, &tmp);
+      }
+      __syncwarp(mask);
+      if (int(blockDim.x * blockDim.y) > 8) {
+        value_type tmp = Kokkos::shfl_down(value, 8, 32);
+        if (id + 8 < int(gridDim.x)) reducer.join(&value, &tmp);
+      }
+      __syncwarp(mask);
+      if (int(blockDim.x * blockDim.y) > 16) {
+        value_type tmp = Kokkos::shfl_down(value, 16, 32);
+        if (id + 16 < int(gridDim.x)) reducer.join(&value, &tmp);
+      }
+      __syncwarp(mask);
+    }
+  }
+  // The last block has in its thread=0 the global reduction value through
+  // "value"
+  return last_block;
+}
+
+template <class FunctorType, bool DoScan, bool UseShfl>
+struct CudaReductionsFunctor;
+
+template <class FunctorType>
+struct CudaReductionsFunctor<FunctorType, false, true> {
+  using pointer_type = typename FunctorType::pointer_type;
+  using Scalar       = typename FunctorType::value_type;
+
+  __device__ static inline void scalar_intra_warp_reduction(
+      const FunctorType& functor,
+      Scalar value,            // Contribution
+      const bool skip_vector,  // Skip threads if Kokkos vector lanes are not
+                               // part of the reduction
+      const int width,         // How much of the warp participates
+      Scalar& result) {
+    unsigned mask =
+        width == 32
+            ? 0xffffffff
+            : ((1 << width) - 1)
+                  << ((threadIdx.y * blockDim.x + threadIdx.x) / width) * width;
+    for (int delta = skip_vector ? blockDim.x : 1; delta < width; delta *= 2) {
+      Scalar tmp = Kokkos::shfl_down(value, delta, width, mask);
+      functor.join(&value, &tmp);
+    }
+
+    Impl::in_place_shfl(result, value, 0, width, mask);
+  }
+
+  __device__ static inline void scalar_intra_block_reduction(
+      const FunctorType& functor, Scalar value, const bool skip,
+      Scalar* my_global_team_buffer_element, const int shared_elements,
+      Scalar* shared_team_buffer_element) {
+    const int warp_id = (threadIdx.y * blockDim.x) / 32;
+    Scalar* const my_shared_team_buffer_element =
+        shared_team_buffer_element + warp_id % shared_elements;
+
+    // Warp Level Reduction, ignoring Kokkos vector entries
+    scalar_intra_warp_reduction(functor, value, skip, 32, value);
+
+    if (warp_id < shared_elements) {
+      *my_shared_team_buffer_element = value;
+    }
+    // Wait for every warp to be done before using one warp to do final cross
+    // warp reduction
+    __syncthreads();
+
+    const int num_warps = blockDim.x * blockDim.y / 32;
+    for (int w = shared_elements; w < num_warps; w += shared_elements) {
+      if (warp_id >= w && warp_id < w + shared_elements) {
+        if ((threadIdx.y * blockDim.x + threadIdx.x) % 32 == 0)
+          functor.join(my_shared_team_buffer_element, &value);
+      }
+      __syncthreads();
+    }
+
+    if (warp_id == 0) {
+      functor.init(&value);
+      for (unsigned int i = threadIdx.y * blockDim.x + threadIdx.x;
+           i < blockDim.y * blockDim.x / 32; i += 32)
+        functor.join(&value, &shared_team_buffer_element[i]);
+      scalar_intra_warp_reduction(functor, value, false, 32,
+                                  *my_global_team_buffer_element);
+    }
+  }
+
+  __device__ static inline bool scalar_inter_block_reduction(
+      const FunctorType& functor, const Cuda::size_type /*block_id*/,
+      const Cuda::size_type block_count, Cuda::size_type* const shared_data,
+      Cuda::size_type* const global_data, Cuda::size_type* const global_flags) {
+    Scalar* const global_team_buffer_element = ((Scalar*)global_data);
+    Scalar* const my_global_team_buffer_element =
+        global_team_buffer_element + blockIdx.x;
+    Scalar* shared_team_buffer_elements = ((Scalar*)shared_data);
+    Scalar value        = shared_team_buffer_elements[threadIdx.y];
+    int shared_elements = blockDim.x * blockDim.y / 32;
+    int global_elements = block_count;
+    __syncthreads();
+
+    scalar_intra_block_reduction(functor, value, true,
+                                 my_global_team_buffer_element, shared_elements,
+                                 shared_team_buffer_elements);
+    __threadfence();
+    __syncthreads();
+    unsigned int num_teams_done = 0;
+    // The cast in the atomic call is necessary to find matching call with
+    // MSVC/NVCC
+    if (threadIdx.x + threadIdx.y == 0) {
+      num_teams_done =
+          Kokkos::atomic_fetch_add(global_flags, static_cast<unsigned int>(1)) +
+          1;
+    }
+    bool is_last_block = false;
+    if (__syncthreads_or(num_teams_done == gridDim.x)) {
+      is_last_block = true;
+      *global_flags = 0;
+      functor.init(&value);
+      for (int i = threadIdx.y * blockDim.x + threadIdx.x; i < global_elements;
+           i += blockDim.x * blockDim.y) {
+        functor.join(&value, &global_team_buffer_element[i]);
+      }
+      scalar_intra_block_reduction(
+          functor, value, false, shared_team_buffer_elements + (blockDim.y - 1),
+          shared_elements, shared_team_buffer_elements);
+    }
+    return is_last_block;
+  }
+};
+
+template <class FunctorType>
+struct CudaReductionsFunctor<FunctorType, false, false> {
+  using pointer_type = typename FunctorType::pointer_type;
+  using Scalar       = typename FunctorType::value_type;
+
+  __device__ static inline void scalar_intra_warp_reduction(
+      const FunctorType& functor,
+      Scalar* value,           // Contribution
+      const bool skip_vector,  // Skip threads if Kokkos vector lanes are not
+                               // part of the reduction
+      const int width)         // How much of the warp participates
+  {
+    unsigned mask =
+        width == 32
+            ? 0xffffffff
+            : ((1 << width) - 1)
+                  << ((threadIdx.y * blockDim.x + threadIdx.x) / width) * width;
+    const int lane_id = (threadIdx.y * blockDim.x + threadIdx.x) % 32;
+
+    __syncwarp(mask);
+
+    for (int delta = skip_vector ? blockDim.x : 1; delta < width; delta *= 2) {
+      if (lane_id + delta < 32) {
+        functor.join(value, value + delta);
+      }
+      __syncwarp(mask);
+    }
+    if (lane_id != 0) {
+      *value = *(value - lane_id);
+    }
+  }
+
+  __device__ static inline void scalar_intra_block_reduction(
+      const FunctorType& functor, Scalar value, const bool skip, Scalar* result,
+      const int /*shared_elements*/, Scalar* shared_team_buffer_element) {
+    const int warp_id = (threadIdx.y * blockDim.x) / 32;
+    Scalar* const my_shared_team_buffer_element =
+        shared_team_buffer_element + threadIdx.y * blockDim.x + threadIdx.x;
+    *my_shared_team_buffer_element = value;
+    // Warp Level Reduction, ignoring Kokkos vector entries
+    scalar_intra_warp_reduction(functor, my_shared_team_buffer_element, skip,
+                                32);
+    // Wait for every warp to be done before using one warp to do final cross
+    // warp reduction
+    __syncthreads();
+
+    if (warp_id == 0) {
+      const unsigned int delta = (threadIdx.y * blockDim.x + threadIdx.x) * 32;
+      if (delta < blockDim.x * blockDim.y)
+        *my_shared_team_buffer_element = shared_team_buffer_element[delta];
+      __syncwarp(0xffffffff);
+      scalar_intra_warp_reduction(functor, my_shared_team_buffer_element, false,
+                                  blockDim.x * blockDim.y / 32);
+      if (threadIdx.x + threadIdx.y == 0) *result = *shared_team_buffer_element;
+    }
+  }
+
+  template <class SizeType = Cuda::size_type>
+  __device__ static inline bool scalar_inter_block_reduction(
+      const FunctorType& functor, const Cuda::size_type /*block_id*/,
+      const Cuda::size_type block_count, SizeType* const shared_data,
+      SizeType* const global_data, Cuda::size_type* const global_flags) {
+    Scalar* const global_team_buffer_element = ((Scalar*)global_data);
+    Scalar* const my_global_team_buffer_element =
+        global_team_buffer_element + blockIdx.x;
+    Scalar* shared_team_buffer_elements = ((Scalar*)shared_data);
+    Scalar value        = shared_team_buffer_elements[threadIdx.y];
+    int shared_elements = blockDim.x * blockDim.y / 32;
+    int global_elements = block_count;
+    __syncthreads();
+
+    scalar_intra_block_reduction(functor, value, true,
+                                 my_global_team_buffer_element, shared_elements,
+                                 shared_team_buffer_elements);
+    __threadfence();
+    __syncthreads();
+
+    unsigned int num_teams_done = 0;
+    // The cast in the atomic call is necessary to find matching call with
+    // MSVC/NVCC
+    if (threadIdx.x + threadIdx.y == 0) {
+      num_teams_done =
+          Kokkos::atomic_fetch_add(global_flags, static_cast<unsigned int>(1)) +
+          1;
+    }
+    bool is_last_block = false;
+    if (__syncthreads_or(num_teams_done == gridDim.x)) {
+      is_last_block = true;
+      *global_flags = 0;
+      functor.init(&value);
+      for (int i = threadIdx.y * blockDim.x + threadIdx.x; i < global_elements;
+           i += blockDim.x * blockDim.y) {
+        functor.join(&value, &global_team_buffer_element[i]);
+      }
+      scalar_intra_block_reduction(
+          functor, value, false, shared_team_buffer_elements + (blockDim.y - 1),
+          shared_elements, shared_team_buffer_elements);
+    }
+    return is_last_block;
+  }
+};
+//----------------------------------------------------------------------------
+// See section B.17 of Cuda C Programming Guide Version 3.2
+// for discussion of
+//   __launch_bounds__(maxThreadsPerBlock,minBlocksPerMultiprocessor)
+// function qualifier which could be used to improve performance.
+//----------------------------------------------------------------------------
+// Maximize shared memory and minimize L1 cache:
+//   cudaFuncSetCacheConfig(MyKernel, cudaFuncCachePreferShared );
+// For 2.0 capability: 48 KB shared and 16 KB L1
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+/*
+ *  Algorithmic constraints:
+ *   (a) blockDim.y <= 1024
+ *   (b) blockDim.x == blockDim.z == 1
+ */
+
+template <bool DoScan, class FunctorType>
+__device__ void cuda_intra_block_reduce_scan(
+    const FunctorType& functor,
+    const typename FunctorType::pointer_type base_data) {
+  using pointer_type = typename FunctorType::pointer_type;
+
+  const unsigned value_count = functor.length();
+  const unsigned not_less_power_of_two =
+      (1 << (Impl::int_log2(blockDim.y - 1) + 1));
+  const unsigned BlockSizeMask = not_less_power_of_two - 1;
+  // There is at most one warp that is neither completely full or empty.
+  // For that warp, we shift all indices logically to the end and ignore join
+  // operations with unassigned indices in the warp when performing the intra
+  // warp reduction/scan.
+  const bool is_full_warp = (((threadIdx.y >> CudaTraits::WarpIndexShift) + 1)
+                             << CudaTraits::WarpIndexShift) <= blockDim.y;
+
+  const unsigned mapped_idx =
+      threadIdx.y + (is_full_warp ? 0
+                                  : (not_less_power_of_two - blockDim.y) &
+                                        (CudaTraits::WarpSize - 1));
+  const pointer_type tdata_intra = base_data + value_count * threadIdx.y;
+  const pointer_type warp_start =
+      base_data + value_count * ((threadIdx.y >> CudaTraits::WarpIndexShift)
+                                 << CudaTraits::WarpIndexShift);
+
+  auto block_reduce_step = [&functor, value_count](
+                               int const R, pointer_type const TD, int const S,
+                               pointer_type memory_start, int index_shift) {
+    const auto join_ptr = TD - (value_count << S) + value_count * index_shift;
+    if (((R + 1) & ((1 << (S + 1)) - 1)) == 0 && join_ptr >= memory_start) {
+      functor.join(TD, join_ptr);
+    }
+  };
+
+  auto block_scan_step = [&functor, value_count](
+                             int const R, pointer_type const TD, int const S,
+                             pointer_type memory_start, int index_shift) {
+    const auto N        = (1 << (S + 1));
+    const auto join_ptr = TD - (value_count << S) + value_count * index_shift;
+    if (R >= N && ((R + 1) & (N - 1)) == (N >> 1) && join_ptr >= memory_start) {
+      functor.join(TD, join_ptr);
+    }
+  };
+
+  {  // Intra-warp reduction:
+    __syncwarp(0xffffffff);
+    block_reduce_step(mapped_idx, tdata_intra, 0, warp_start, 0);
+    __syncwarp(0xffffffff);
+    block_reduce_step(mapped_idx, tdata_intra, 1, warp_start, 0);
+    __syncwarp(0xffffffff);
+    block_reduce_step(mapped_idx, tdata_intra, 2, warp_start, 0);
+    __syncwarp(0xffffffff);
+    block_reduce_step(mapped_idx, tdata_intra, 3, warp_start, 0);
+    __syncwarp(0xffffffff);
+    block_reduce_step(mapped_idx, tdata_intra, 4, warp_start, 0);
+    __syncwarp(0xffffffff);
+  }
+
+  __syncthreads();  // Wait for all warps to reduce
+
+  // Inter-warp reduce-scan by a single warp to avoid extra synchronizations.
+  {
+    // There is at most one warp where the memory address to be used is not
+    // (CudaTraits::WarpSize - 1) away from the warp start adress. For the
+    // following reduction, we shift all indices logically to the end of the
+    // next power-of-two to the number of warps.
+    const unsigned n_active_warps =
+        ((blockDim.y - 1) >> CudaTraits::WarpIndexShift) + 1;
+    const unsigned inner_mask =
+        __ballot_sync(0xffffffff, (threadIdx.y < n_active_warps));
+    if (threadIdx.y < n_active_warps) {
+      const bool is_full_warp_inter =
+          threadIdx.y < (blockDim.y >> CudaTraits::WarpIndexShift);
+      const pointer_type tdata_inter =
+          base_data +
+          value_count * (is_full_warp_inter
+                             ? (threadIdx.y << CudaTraits::WarpIndexShift) +
+                                   (CudaTraits::WarpSize - 1)
+                             : blockDim.y - 1);
+      const unsigned index_shift =
+          is_full_warp_inter
+              ? 0
+              : blockDim.y - (threadIdx.y << CudaTraits::WarpIndexShift);
+      const int rtid_inter = (threadIdx.y << CudaTraits::WarpIndexShift) +
+                             (CudaTraits::WarpSize - 1) - index_shift;
+
+      if ((1 << 5) < BlockSizeMask) {
+        __syncwarp(inner_mask);
+        block_reduce_step(rtid_inter, tdata_inter, 5, base_data, index_shift);
+      }
+      if ((1 << 6) < BlockSizeMask) {
+        __syncwarp(inner_mask);
+        block_reduce_step(rtid_inter, tdata_inter, 6, base_data, index_shift);
+      }
+      if ((1 << 7) < BlockSizeMask) {
+        __syncwarp(inner_mask);
+        block_reduce_step(rtid_inter, tdata_inter, 7, base_data, index_shift);
+      }
+      if ((1 << 8) < BlockSizeMask) {
+        __syncwarp(inner_mask);
+        block_reduce_step(rtid_inter, tdata_inter, 8, base_data, index_shift);
+      }
+      if ((1 << 9) < BlockSizeMask) {
+        __syncwarp(inner_mask);
+        block_reduce_step(rtid_inter, tdata_inter, 9, base_data, index_shift);
+      }
+
+      if (DoScan) {
+        __syncwarp(inner_mask);
+        block_scan_step(rtid_inter, tdata_inter, 8, base_data, index_shift);
+        __syncwarp(inner_mask);
+        block_scan_step(rtid_inter, tdata_inter, 7, base_data, index_shift);
+        __syncwarp(inner_mask);
+        block_scan_step(rtid_inter, tdata_inter, 6, base_data, index_shift);
+        __syncwarp(inner_mask);
+        block_scan_step(rtid_inter, tdata_inter, 5, base_data, index_shift);
+      }
+    }
+  }
+
+  __syncthreads();  // Wait for inter-warp reduce-scan to complete
+
+  if (DoScan) {
+    block_scan_step(mapped_idx, tdata_intra, 4, warp_start, 0);
+    __threadfence_block();
+    __syncwarp(0xffffffff);
+    block_scan_step(mapped_idx, tdata_intra, 3, warp_start, 0);
+    __threadfence_block();
+    __syncwarp(0xffffffff);
+    block_scan_step(mapped_idx, tdata_intra, 2, warp_start, 0);
+    __threadfence_block();
+    __syncwarp(0xffffffff);
+    block_scan_step(mapped_idx, tdata_intra, 1, warp_start, 0);
+    __threadfence_block();
+    __syncwarp(0xffffffff);
+    block_scan_step(mapped_idx, tdata_intra, 0, warp_start, 0);
+    __threadfence_block();
+    __syncwarp(0xffffffff);
+    // Update with total from previous warps
+    if (mapped_idx >= CudaTraits::WarpSize &&
+        (mapped_idx & (CudaTraits::WarpSize - 1)) != (CudaTraits::WarpSize - 1))
+      functor.join(tdata_intra, warp_start - value_count);
+    __syncwarp(0xffffffff);
+  }
+}
+
+//----------------------------------------------------------------------------
+/**\brief  Input value-per-thread starting at 'shared_data'.
+ *         Reduction value at last thread's location.
+ *
+ *  If 'DoScan' then write blocks' scan values and block-groups' scan values.
+ *
+ *  Global reduce result is in the last threads' 'shared_data' location.
+ */
+
+template <bool DoScan, class FunctorType, class SizeType = Cuda::size_type>
+__device__ bool cuda_single_inter_block_reduce_scan2(
+    const FunctorType& functor, const Cuda::size_type block_id,
+    const Cuda::size_type block_count, SizeType* const shared_data,
+    SizeType* const global_data, Cuda::size_type* const global_flags) {
+  using size_type    = SizeType;
+  using value_type   = typename FunctorType::value_type;
+  using pointer_type = typename FunctorType::pointer_type;
+
+  // '__ffs' = position of the least significant bit set to 1.
+  // 'blockDim.y' is guaranteed to be a power of two so this
+  // is the integral shift value that can replace an integral divide.
+  const unsigned BlockSizeShift = __ffs(blockDim.y) - 1;
+  const unsigned BlockSizeMask  = blockDim.y - 1;
+
+  // Must have power of two thread count
+  if (BlockSizeMask & blockDim.y) {
+    Kokkos::abort(
+        "Cuda::cuda_single_inter_block_reduce_scan requires power-of-two "
+        "blockDim");
+  }
+
+  const integral_nonzero_constant<
+      size_type, std::is_pointer<typename FunctorType::reference_type>::value
+                     ? 0
+                     : sizeof(value_type) / sizeof(size_type)>
+      word_count((sizeof(value_type) * functor.length()) / sizeof(size_type));
+
+  // Reduce the accumulation for the entire block.
+  cuda_intra_block_reduce_scan<false>(functor, pointer_type(shared_data));
+  {
+    // Write accumulation total to global scratch space.
+    // Accumulation total is the last thread's data.
+    size_type* const shared = shared_data + word_count.value * BlockSizeMask;
+    size_type* const global = global_data + word_count.value * block_id;
+
+    for (int i = int(threadIdx.y); i < int(word_count.value);
+         i += int(blockDim.y)) {
+      global[i] = shared[i];
+    }
+  }
+  __threadfence();
+
+  // Contributing blocks note that their contribution has been completed via an
+  // atomic-increment flag If this block is not the last block to contribute to
+  // this group then the block is done.
+  const bool is_last_block = !__syncthreads_or(
+      threadIdx.y
+          ? 0
+          : (1 + atomicInc(global_flags, block_count - 1) < block_count));
+
+  if (is_last_block) {
+    const size_type b =
+        (long(block_count) * long(threadIdx.y)) >> BlockSizeShift;
+    const size_type e =
+        (long(block_count) * long(threadIdx.y + 1)) >> BlockSizeShift;
+
+    {
+      void* const shared_ptr = shared_data + word_count.value * threadIdx.y;
+      /* reference_type shared_value = */ functor.init(
+          static_cast<pointer_type>(shared_ptr));
+
+      for (size_type i = b; i < e; ++i) {
+        functor.join(
+            static_cast<pointer_type>(shared_ptr),
+            reinterpret_cast<pointer_type>(global_data + word_count.value * i));
+      }
+    }
+
+    cuda_intra_block_reduce_scan<DoScan>(functor, pointer_type(shared_data));
+
+    if (DoScan) {
+      pointer_type const shared_value = reinterpret_cast<pointer_type>(
+          shared_data +
+          word_count.value * (threadIdx.y ? threadIdx.y - 1 : blockDim.y));
+
+      if (!threadIdx.y) {
+        functor.init(shared_value);
+      }
+
+      // Join previous inclusive scan value to each member
+      for (size_type i = b; i < e; ++i) {
+        size_type* const global_value = global_data + word_count.value * i;
+        functor.join(shared_value,
+                     reinterpret_cast<pointer_type>(global_value));
+        functor.copy(reinterpret_cast<pointer_type>(global_value),
+                     reinterpret_cast<pointer_type>(shared_value));
+      }
+    }
+  }
+
+  return is_last_block;
+}
+
+template <bool DoScan, class FunctorType, class SizeType = Cuda::size_type>
+__device__ bool cuda_single_inter_block_reduce_scan(
+    const FunctorType& functor, const Cuda::size_type block_id,
+    const Cuda::size_type block_count, SizeType* const shared_data,
+    SizeType* const global_data, Cuda::size_type* const global_flags) {
+  if (!DoScan && !std::is_pointer<typename FunctorType::reference_type>::value)
+    return Kokkos::Impl::CudaReductionsFunctor<
+        FunctorType, false, (sizeof(typename FunctorType::value_type) > 16)>::
+        scalar_inter_block_reduction(functor, block_id, block_count,
+                                     shared_data, global_data, global_flags);
+  else
+    return cuda_single_inter_block_reduce_scan2<DoScan>(
+        functor, block_id, block_count, shared_data, global_data, global_flags);
+}
+
+// Size in bytes required for inter block reduce or scan
+template <bool DoScan, class FunctorType, class ArgTag>
+inline std::enable_if_t<DoScan, unsigned>
+cuda_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
+                                          const unsigned BlockSize) {
+  using Analysis =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                            RangePolicy<Cuda, ArgTag>, FunctorType>;
+
+  return (BlockSize + 2) * Analysis::value_size(functor);
+}
+
+template <bool DoScan, class FunctorType, class ArgTag>
+inline std::enable_if_t<!DoScan, unsigned>
+cuda_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
+                                          const unsigned BlockSize) {
+  using Analysis =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                            RangePolicy<Cuda, ArgTag>, FunctorType>;
+
+  return (BlockSize + 2) * Analysis::value_size(functor);
+}
+
+template <typename WorkTag, typename Policy, typename FunctorType>
+inline void check_reduced_view_shmem_size(const Policy& policy,
+                                          const FunctorType& functor) {
+  size_t minBlockSize = CudaTraits::WarpSize * 1;
+  unsigned reqShmemSize =
+      cuda_single_inter_block_reduce_scan_shmem<false, FunctorType, WorkTag>(
+          functor, minBlockSize);
+  size_t maxShmemPerBlock =
+      policy.space().impl_internal_space_instance()->m_maxShmemPerBlock;
+
+  if (reqShmemSize > maxShmemPerBlock) {
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::ParallelReduce< Cuda > requested too much L0 scratch "
+        "memory");
+  }
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined(KOKKOS_ENABLE_CUDA) */
+#endif /* KOKKOS_CUDA_REDUCESCAN_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Task.cpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Task.cpp
new file mode 100644 (file)
index 0000000..8f05448
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<
+    Kokkos::Cuda,
+    Impl::default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda> >;
+template class TaskQueueMultiple<
+    Kokkos::Cuda,
+    Impl::default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda> >;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+#else
+void KOKKOS_CORE_SRC_CUDA_KOKKOS_CUDA_TASK_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) && defined( KOKKOS_ENABLE_TASKDAG \
+          ) */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Task.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Task.hpp
new file mode 100644 (file)
index 0000000..1f2e394
--- /dev/null
@@ -0,0 +1,1241 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_CUDA_TASK_HPP
+#define KOKKOS_IMPL_CUDA_TASK_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <impl/Kokkos_TaskBase.hpp>
+#include <Cuda/Kokkos_Cuda_Error.hpp>  // KOKKOS_IMPL_CUDA_SAFE_CALL
+#include <impl/Kokkos_TaskTeamMember.hpp>
+
+//----------------------------------------------------------------------------
+
+#if defined(__CUDA_ARCH__)
+#define KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN(MSG)                           \
+  {                                                                        \
+    __syncwarp();                                                          \
+    const unsigned b = __activemask();                                     \
+    if (b != 0xffffffff) {                                                 \
+      printf(" SYNCWARP AT %s (%d,%d,%d) (%d,%d,%d) failed %x\n", MSG,     \
+             blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, \
+             threadIdx.z, b);                                              \
+      return;                                                              \
+    }                                                                      \
+  }
+#else
+#define KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN(MSG)
+#endif
+
+namespace Kokkos {
+namespace Impl {
+namespace {
+
+template <typename TaskType>
+__global__ void set_cuda_task_base_apply_function_pointer(
+    typename TaskType::function_type* ptr,
+    typename TaskType::destroy_type* dtor) {
+  *ptr  = TaskType::apply;
+  *dtor = TaskType::destroy;
+}
+
+template <typename Scheduler>
+__global__ void cuda_task_queue_execute(Scheduler scheduler,
+                                        int32_t shmem_size) {
+  TaskQueueSpecialization<Scheduler>::driver(std::move(scheduler), shmem_size);
+}
+
+}  // namespace
+
+template <class, class>
+class TaskExec;
+
+template <class QueueType>
+class TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::Cuda, QueueType>> {
+ public:
+  using scheduler_type  = SimpleTaskScheduler<Kokkos::Cuda, QueueType>;
+  using execution_space = Kokkos::Cuda;
+  using memory_space    = Kokkos::CudaUVMSpace;
+  using member_type     = TaskExec<Kokkos::Cuda, scheduler_type>;
+
+  enum : long { max_league_size = 16 };
+  enum : int { warps_per_block = 4 };
+
+  KOKKOS_INLINE_FUNCTION
+  static void iff_single_thread_recursive_execute(scheduler_type const&) {}
+
+  static int get_max_team_count(execution_space const&) {
+    return Kokkos::Impl::cuda_internal_multiprocessor_count() * warps_per_block;
+  }
+
+  __device__ static void driver(scheduler_type scheduler,
+                                int32_t shmem_per_warp) {
+    using queue_type     = typename scheduler_type::task_queue_type;
+    using task_base_type = typename scheduler_type::task_base_type;
+    using runnable_task_base_type =
+        typename scheduler_type::runnable_task_base_type;
+    using scheduling_info_storage_type = SchedulingInfoStorage<
+        runnable_task_base_type,
+        typename scheduler_type::task_scheduling_info_type>;
+
+    extern __shared__ int32_t shmem_all[];
+
+    int32_t* const warp_shmem =
+        shmem_all + (threadIdx.z * shmem_per_warp) / sizeof(int32_t);
+
+    task_base_type* const shared_memory_task_copy = (task_base_type*)warp_shmem;
+
+    const int warp_lane = threadIdx.x + threadIdx.y * blockDim.x;
+
+    member_type single_exec(scheduler, warp_shmem, 1);
+    member_type team_exec(scheduler, warp_shmem, blockDim.y);
+
+    auto& queue          = scheduler.queue();
+    auto& team_scheduler = team_exec.scheduler();
+
+    auto current_task = OptionalRef<task_base_type>();
+
+    // Loop until all queues are empty and no tasks in flight
+    while (!queue.is_done()) {
+      if (warp_lane == 0) {  // should be (?) same as team_exec.team_rank() == 0
+        // pop off a task
+        current_task =
+            queue.pop_ready_task(team_scheduler.team_scheduler_info());
+      }
+
+      // Broadcast task pointer:
+
+      // Sync before the broadcast
+      __syncwarp(0xffffffff);
+
+      // pretend it's an int* for shuffle purposes
+      ((int*)&current_task)[0] =
+          __shfl_sync(0xffffffff, ((int*)&current_task)[0], 0, 32);
+      ((int*)&current_task)[1] =
+          __shfl_sync(0xffffffff, ((int*)&current_task)[1], 0, 32);
+
+      if (current_task) {
+        KOKKOS_ASSERT(!current_task->as_runnable_task().get_respawn_flag());
+
+        int32_t b = sizeof(scheduling_info_storage_type) / sizeof(int32_t);
+        static_assert(
+            sizeof(scheduling_info_storage_type) % sizeof(int32_t) == 0,
+            "bad task size");
+        int32_t const e = current_task->get_allocation_size() / sizeof(int32_t);
+        KOKKOS_ASSERT(current_task->get_allocation_size() % sizeof(int32_t) ==
+                      0);
+
+        int32_t volatile* const task_mem =
+            (int32_t volatile*)current_task.get();
+
+        // do a coordinated copy of the task closure from global to shared
+        // memory:
+        for (int32_t i = warp_lane; i < e; i += CudaTraits::WarpSize) {
+          warp_shmem[i] = task_mem[i];
+        }
+
+        // Synchronize threads of the warp and insure memory
+        // writes are visible to all threads in the warp.
+        __syncwarp(0xffffffff);
+
+        if (shared_memory_task_copy->is_team_runnable()) {
+          // Thread Team Task
+          shared_memory_task_copy->as_runnable_task().run(team_exec);
+        } else if (threadIdx.y == 0) {
+          // TODO @tasking @optimization DSH Change this to warp_lane == 0 when
+          // we allow blockDim.x to be more than 1 Single Thread Task
+          shared_memory_task_copy->as_runnable_task().run(single_exec);
+        }
+
+        // Synchronize threads of the warp and insure memory
+        // writes are visible to all threads in the warp.
+
+        __syncwarp(0xffffffff);
+
+        // if(warp_lane < b % CudaTraits::WarpSize) b += CudaTraits::WarpSize;
+        // b -= b % CudaTraits::WarpSize;
+
+        // copy task closure from shared to global memory:
+        for (int32_t i = b + warp_lane; i < e; i += CudaTraits::WarpSize) {
+          task_mem[i] = warp_shmem[i];
+        }
+
+        // Synchronize threads of the warp and insure memory
+        // writes are visible to root thread of the warp for
+        // respawn or completion.
+
+        __syncwarp(0xffffffff);
+
+        if (warp_lane == 0) {
+          // If respawn requested copy respawn data back to main memory
+          if (shared_memory_task_copy->as_runnable_task().get_respawn_flag()) {
+            if (shared_memory_task_copy->as_runnable_task().has_predecessor()) {
+              // It's not necessary to make this a volatile write because
+              // the next read of the predecessor is on this thread in complete,
+              // and the predecessor is cleared there (using a volatile write)
+              current_task->as_runnable_task().acquire_predecessor_from(
+                  shared_memory_task_copy->as_runnable_task());
+            }
+
+            // It may not necessary to make this a volatile write, since the
+            // next read will be done by this thread in complete where the
+            // rescheduling occurs, but since the task could be stolen later
+            // before this is written again, we should do the volatile write
+            // here.  (It might not be necessary though because I don't know
+            // where else the priority would be read after it is scheduled
+            // by this thread; for now, we leave it volatile, but we should
+            // benchmark the cost of this.)
+            current_task.as_volatile()->set_priority(
+                shared_memory_task_copy->get_priority());
+
+            // It's not necessary to make this a volatile write, since the
+            // next read of it (if true) will be by this thread in `complete()`,
+            // which will unset the flag (using volatile) once it has handled
+            // the respawn
+            current_task->as_runnable_task().set_respawn_flag();
+          }
+
+          queue.complete((*std::move(current_task)).as_runnable_task(),
+                         team_scheduler.team_scheduler_info());
+        }
+      }
+    }
+  }
+
+  static void execute(scheduler_type const& scheduler) {
+    const int shared_per_warp = 2048;
+    const dim3 grid(Kokkos::Impl::cuda_internal_multiprocessor_count(), 1, 1);
+    const dim3 block(1, Kokkos::Impl::CudaTraits::WarpSize, warps_per_block);
+    const int shared_total    = shared_per_warp * warps_per_block;
+    const cudaStream_t stream = nullptr;
+
+    KOKKOS_ASSERT(
+        static_cast<long>(grid.x * grid.y * grid.z * block.x * block.y *
+                          block.z) ==
+        static_cast<long>(get_max_team_count(scheduler.get_execution_space()) *
+                          Kokkos::Impl::CudaTraits::WarpSize));
+
+    auto& queue = scheduler.queue();
+
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::"
+        "Cuda>::execute: Pre Task Execution");
+
+    // Query the stack size, in bytes:
+
+    size_t previous_stack_size = 0;
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaDeviceGetLimit(&previous_stack_size, cudaLimitStackSize));
+
+    // If not large enough then set the stack size, in bytes:
+
+    const size_t larger_stack_size = 1 << 11;
+
+    if (previous_stack_size < larger_stack_size) {
+      KOKKOS_IMPL_CUDA_SAFE_CALL(
+          cudaDeviceSetLimit(cudaLimitStackSize, larger_stack_size));
+    }
+
+    cuda_task_queue_execute<<<grid, block, shared_total, stream>>>(
+        scheduler, shared_per_warp);
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetLastError());
+
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::"
+        "Cuda>::execute: Post Task Execution");
+
+    if (previous_stack_size < larger_stack_size) {
+      KOKKOS_IMPL_CUDA_SAFE_CALL(
+          cudaDeviceSetLimit(cudaLimitStackSize, previous_stack_size));
+    }
+  }
+
+  template <typename TaskType>
+  static
+      // TODO @tasking @optimiazation DSH specialize this for trivially
+      // destructible types
+      void
+      get_function_pointer(typename TaskType::function_type& ptr,
+                           typename TaskType::destroy_type& dtor) {
+    using function_type = typename TaskType::function_type;
+    using destroy_type  = typename TaskType::destroy_type;
+
+    // TODO @tasking @minor DSH make sure there aren't any alignment concerns?
+    void* storage = cuda_internal_scratch_unified(
+        Kokkos::Cuda(), sizeof(function_type) + sizeof(destroy_type));
+    function_type* ptr_ptr = (function_type*)storage;
+    destroy_type* dtor_ptr =
+        (destroy_type*)((char*)storage + sizeof(function_type));
+
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::"
+        "Cuda>::execute: Pre Get Function Pointer for Tasks");
+
+    set_cuda_task_base_apply_function_pointer<TaskType>
+        <<<1, 1>>>(ptr_ptr, dtor_ptr);
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetLastError());
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::"
+        "Cuda>::execute: Post Get Function Pointer for Tasks");
+
+    ptr  = *ptr_ptr;
+    dtor = *dtor_ptr;
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class Scheduler>
+class TaskQueueSpecializationConstrained<
+    Scheduler, std::enable_if_t<std::is_same<
+                   typename Scheduler::execution_space, Kokkos::Cuda>::value>> {
+ public:
+  using scheduler_type  = Scheduler;
+  using execution_space = Kokkos::Cuda;
+  using memory_space    = Kokkos::CudaUVMSpace;
+  using member_type     = TaskExec<Kokkos::Cuda, Scheduler>;
+
+  enum : long { max_league_size = 16 };
+
+  KOKKOS_INLINE_FUNCTION
+  static void iff_single_thread_recursive_execute(scheduler_type const&) {}
+
+  __device__ static void driver(scheduler_type scheduler,
+                                int32_t shmem_per_warp) {
+    using queue_type     = typename scheduler_type::queue_type;
+    using task_root_type = TaskBase;
+
+    extern __shared__ int32_t shmem_all[];
+
+    task_root_type* const end = (task_root_type*)task_root_type::EndTag;
+    task_root_type* const no_more_tasks_sentinel = nullptr;
+
+    int32_t* const warp_shmem =
+        shmem_all + (threadIdx.z * shmem_per_warp) / sizeof(int32_t);
+
+    task_root_type* const task_shmem = (task_root_type*)warp_shmem;
+
+    const int warp_lane = threadIdx.x + threadIdx.y * blockDim.x;
+
+    member_type single_exec(scheduler, warp_shmem, 1);
+    member_type team_exec(scheduler, warp_shmem, blockDim.y);
+
+    auto& team_queue = team_exec.scheduler().queue();
+
+    task_root_type* task_ptr = no_more_tasks_sentinel;
+
+    // Loop until all queues are empty and no tasks in flight
+
+    do {
+      // Each team lead attempts to acquire either a thread team task
+      // or collection of single thread tasks for the team.
+
+      if (0 == warp_lane) {
+        if (*((volatile int*)&team_queue.m_ready_count) > 0) {
+          task_ptr = end;
+          // Attempt to acquire a task
+          // Loop by priority and then type
+          for (int i = 0; i < queue_type::NumQueue && end == task_ptr; ++i) {
+            for (int j = 0; j < 2 && end == task_ptr; ++j) {
+              task_ptr = queue_type::pop_ready_task(&team_queue.m_ready[i][j]);
+            }
+          }
+        } else {
+          // returns nullptr if and only if all other queues have a ready
+          // count of 0 also. Otherwise, returns a task from another queue
+          // or `end` if one couldn't be popped
+          task_ptr = team_queue.attempt_to_steal_task();
+        }
+      }
+
+      // Synchronize warp with memory fence before broadcasting task pointer:
+
+      // KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN( "A" );
+      __syncwarp(0xffffffff);
+
+      // Broadcast task pointer:
+
+      ((int*)&task_ptr)[0] =
+          __shfl_sync(0xffffffff, ((int*)&task_ptr)[0], 0, 32);
+      ((int*)&task_ptr)[1] =
+          __shfl_sync(0xffffffff, ((int*)&task_ptr)[1], 0, 32);
+
+#if defined(KOKKOS_ENABLE_DEBUG)
+      KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN("TaskQueue CUDA task_ptr");
+#endif
+
+      if (0 == task_ptr) break;  // 0 == queue->m_ready_count
+
+      if (end != task_ptr) {
+        // Whole warp copy task's closure to/from shared memory.
+        // Use all threads of warp for coalesced read/write.
+
+        int32_t const b = sizeof(task_root_type) / sizeof(int32_t);
+        int32_t const e =
+            *((int32_t volatile*)(&task_ptr->m_alloc_size)) / sizeof(int32_t);
+
+        int32_t volatile* const task_mem = (int32_t volatile*)task_ptr;
+
+        KOKKOS_ASSERT(e * sizeof(int32_t) < shmem_per_warp);
+
+        // copy task closure from global to shared memory:
+
+        for (int32_t i = warp_lane; i < e; i += CudaTraits::WarpSize) {
+          warp_shmem[i] = task_mem[i];
+        }
+
+        // Synchronize threads of the warp and insure memory
+        // writes are visible to all threads in the warp.
+
+        // KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN( "B" );
+        __syncwarp(0xffffffff);
+
+        if (task_root_type::TaskTeam == task_shmem->m_task_type) {
+          // Thread Team Task
+          (*task_shmem->m_apply)(task_shmem, &team_exec);
+        } else if (0 == threadIdx.y) {
+          // Single Thread Task
+          (*task_shmem->m_apply)(task_shmem, &single_exec);
+        }
+
+        // Synchronize threads of the warp and insure memory
+        // writes are visible to all threads in the warp.
+
+        // KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN( "C" );
+        __syncwarp(0xffffffff);
+
+        // copy task closure from shared to global memory:
+
+        for (int32_t i = b + warp_lane; i < e; i += CudaTraits::WarpSize) {
+          task_mem[i] = warp_shmem[i];
+        }
+
+        // Synchronize threads of the warp and insure memory
+        // writes are visible to root thread of the warp for
+        // respawn or completion.
+
+        // KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN( "D" );
+        __syncwarp(0xffffffff);
+
+        // If respawn requested copy respawn data back to main memory
+
+        if (0 == warp_lane) {
+          if (((task_root_type*)task_root_type::LockTag) !=
+              task_shmem->m_next) {
+            ((volatile task_root_type*)task_ptr)->m_next = task_shmem->m_next;
+            ((volatile task_root_type*)task_ptr)->m_priority =
+                task_shmem->m_priority;
+          }
+
+          team_queue.complete(task_ptr);
+        }
+      }
+    } while (1);
+  }
+
+  static void execute(scheduler_type const& scheduler) {
+    const int shared_per_warp = 2048;
+    const int warps_per_block = 4;
+    const dim3 grid(Kokkos::Impl::cuda_internal_multiprocessor_count(), 1, 1);
+    // const dim3 grid( 1 , 1 , 1 );
+    const dim3 block(1, Kokkos::Impl::CudaTraits::WarpSize, warps_per_block);
+    const int shared_total    = shared_per_warp * warps_per_block;
+    const cudaStream_t stream = 0;
+
+    auto& queue = scheduler.queue();
+    queue.initialize_team_queues(warps_per_block * grid.x);
+
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecializationConstrained<SimpleTaskScheduler<"
+        "Kokkos::Cuda>::execute: Pre Execute Task");
+
+    // Query the stack size, in bytes:
+
+    size_t previous_stack_size = 0;
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaDeviceGetLimit(&previous_stack_size, cudaLimitStackSize));
+
+    // If not large enough then set the stack size, in bytes:
+
+    const size_t larger_stack_size = 2048;
+
+    if (previous_stack_size < larger_stack_size) {
+      KOKKOS_IMPL_CUDA_SAFE_CALL(
+          cudaDeviceSetLimit(cudaLimitStackSize, larger_stack_size));
+    }
+
+    cuda_task_queue_execute<<<grid, block, shared_total, stream>>>(
+        scheduler, shared_per_warp);
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetLastError());
+
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecializationConstrained<SimpleTaskScheduler<"
+        "Kokkos::Cuda>::execute: Post Execute Task");
+
+    if (previous_stack_size < larger_stack_size) {
+      KOKKOS_IMPL_CUDA_SAFE_CALL(
+          cudaDeviceSetLimit(cudaLimitStackSize, previous_stack_size));
+    }
+  }
+
+  template <typename TaskType>
+  static void get_function_pointer(typename TaskType::function_type& ptr,
+                                   typename TaskType::destroy_type& dtor) {
+    using function_type = typename TaskType::function_type;
+    using destroy_type  = typename TaskType::destroy_type;
+
+    void* storage = cuda_internal_scratch_unified(
+        Kokkos::Cuda(), sizeof(function_type) + sizeof(destroy_type));
+    function_type* ptr_ptr = (function_type*)storage;
+    destroy_type* dtor_ptr =
+        (destroy_type*)((char*)storage + sizeof(function_type));
+
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecializationConstrained<SimpleTaskScheduler<"
+        "Kokkos::Cuda>::get_function_pointer: Pre Get Function Pointer");
+
+    set_cuda_task_base_apply_function_pointer<TaskType>
+        <<<1, 1>>>(ptr_ptr, dtor_ptr);
+
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaGetLastError());
+    Impl::cuda_device_synchronize(
+        "Kokkos::Impl::TaskQueueSpecializationConstrained<SimpleTaskScheduler<"
+        "Kokkos::Cuda>::get_function_pointer: Post Get Function Pointer");
+
+    ptr  = *ptr_ptr;
+    dtor = *dtor_ptr;
+  }
+};
+
+extern template class TaskQueue<
+    Kokkos::Cuda,
+    default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda>>;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/**\brief  Impl::TaskExec<Cuda> is the TaskScheduler<Cuda>::member_type
+ *         passed to tasks running in a Cuda space.
+ *
+ *  Cuda thread blocks for tasking are dimensioned:
+ *    blockDim.x == vector length
+ *    blockDim.y == team size
+ *    blockDim.z == number of teams
+ *  where
+ *    blockDim.x * blockDim.y == WarpSize
+ *
+ *  Current implementation requires blockDim.x == 1.
+ *  Vector level parallelism with blockDim.y > 1 on Volta will
+ *  require a vector-level synchronization mask for vector-level
+ *  collective operaitons.
+ *
+ *  Both single thread and thread team tasks are run by a full Cuda warp.
+ *  A single thread task is called by warp lane #0 and the remaining
+ *  lanes of the warp are idle.
+ *
+ *  When executing a single thread task the syncwarp or other
+ *  warp synchronizing functions must not be called.
+ */
+template <class Scheduler>
+class TaskExec<Kokkos::Cuda, Scheduler> {
+ private:
+  enum : int { WarpSize = Kokkos::Impl::CudaTraits::WarpSize };
+
+  TaskExec(TaskExec&&)      = delete;
+  TaskExec(TaskExec const&) = delete;
+  TaskExec& operator=(TaskExec&&) = delete;
+  TaskExec& operator=(TaskExec const&) = delete;
+
+  friend class Kokkos::Impl::TaskQueue<
+      Kokkos::Cuda,
+      default_tasking_memory_space_for_execution_space_t<Kokkos::Cuda>>;
+  template <class, class>
+  friend class Kokkos::Impl::TaskQueueSpecializationConstrained;
+  template <class>
+  friend class Kokkos::Impl::TaskQueueSpecialization;
+
+  int32_t* m_team_shmem;
+  const int m_team_size;
+  Scheduler m_scheduler;
+
+  // If constructed with arg_team_size == 1 the object
+  // can only be used by 0 == threadIdx.y.
+  KOKKOS_INLINE_FUNCTION
+  TaskExec(Scheduler const& parent_scheduler, int32_t* arg_team_shmem,
+           int arg_team_size = blockDim.y)
+      : m_team_shmem(arg_team_shmem),
+        m_team_size(arg_team_size),
+        m_scheduler(parent_scheduler.get_team_scheduler(league_rank())) {}
+
+ public:
+  using thread_team_member = TaskExec;
+
+#if defined(__CUDA_ARCH__)
+  __device__ int team_rank() const { return threadIdx.y; }
+  __device__ int team_size() const { return m_team_size; }
+  //__device__ int league_rank() const { return threadIdx.z; }
+  __device__ int league_rank() const {
+    return blockIdx.x * blockDim.z + threadIdx.z;
+  }
+  __device__ int league_size() const { return blockDim.z * gridDim.x; }
+
+  __device__ void team_barrier() const {
+    if (1 < m_team_size) {
+      __syncwarp(0xffffffff);
+    }
+  }
+
+  template <class ValueType>
+  __device__ void team_broadcast(ValueType& val, const int thread_id) const {
+    if (1 < m_team_size) {
+      // WarpSize = blockDim.X * blockDim.y
+      // thread_id < blockDim.y
+      ValueType tmp(val);  // input might not be register variable
+      Impl::in_place_shfl(val, tmp, blockDim.x * thread_id, WarpSize);
+    }
+  }
+
+#else
+  __host__ int team_rank() const { return 0; }
+  __host__ int team_size() const { return 0; }
+  __host__ int league_rank() const { return 0; }
+  __host__ int league_size() const { return 0; }
+  __host__ void team_barrier() const {}
+  template <class ValueType>
+  __host__ void team_broadcast(ValueType&, const int) const {}
+#endif
+
+  KOKKOS_INLINE_FUNCTION Scheduler const& scheduler() const noexcept {
+    return m_scheduler;
+  }
+  KOKKOS_INLINE_FUNCTION Scheduler& scheduler() noexcept { return m_scheduler; }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename iType, typename Scheduler>
+struct TeamThreadRangeBoundariesStruct<iType,
+                                       TaskExec<Kokkos::Cuda, Scheduler>> {
+  using index_type  = iType;
+  using member_type = TaskExec<Kokkos::Cuda, Scheduler>;
+
+  const iType start;
+  const iType end;
+  const iType increment;
+  member_type const& thread;
+
+#if defined(__CUDA_ARCH__)
+
+  __device__ inline TeamThreadRangeBoundariesStruct(
+      member_type const& arg_thread, const iType& arg_count)
+      : start(threadIdx.y),
+        end(arg_count),
+        increment(blockDim.y),
+        thread(arg_thread) {}
+
+  __device__ inline TeamThreadRangeBoundariesStruct(
+      member_type const& arg_thread, const iType& arg_start,
+      const iType& arg_end)
+      : start(arg_start + threadIdx.y),
+        end(arg_end),
+        increment(blockDim.y),
+        thread(arg_thread) {}
+
+#else
+
+  TeamThreadRangeBoundariesStruct(member_type const& arg_thread,
+                                  const iType& arg_count);
+
+  TeamThreadRangeBoundariesStruct(member_type const& arg_thread,
+                                  const iType& arg_start, const iType& arg_end);
+
+#endif
+};
+
+//----------------------------------------------------------------------------
+
+template <typename iType, typename Scheduler>
+struct ThreadVectorRangeBoundariesStruct<iType,
+                                         TaskExec<Kokkos::Cuda, Scheduler>> {
+  using index_type  = iType;
+  using member_type = TaskExec<Kokkos::Cuda, Scheduler>;
+
+  const index_type start;
+  const index_type end;
+  const index_type increment;
+  const member_type& thread;
+
+#if defined(__CUDA_ARCH__)
+
+  __device__ inline ThreadVectorRangeBoundariesStruct(
+      member_type const& arg_thread, const index_type& arg_count)
+      : start(threadIdx.x),
+        end(arg_count),
+        increment(blockDim.x),
+        thread(arg_thread) {}
+
+  __device__ inline ThreadVectorRangeBoundariesStruct(
+      member_type const& arg_thread, const index_type& arg_begin,
+      const index_type& arg_end)
+      : start(arg_begin + threadIdx.x),
+        end(arg_end),
+        increment(blockDim.x),
+        thread(arg_thread) {}
+
+#else
+
+  ThreadVectorRangeBoundariesStruct(member_type const& arg_thread,
+                                    const index_type& arg_count);
+
+  ThreadVectorRangeBoundariesStruct(member_type const& arg_thread,
+                                    const index_type& arg_begin,
+                                    const index_type& arg_end);
+
+#endif
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+// template<typename iType>
+// KOKKOS_INLINE_FUNCTION
+// Impl::TeamThreadRangeBoundariesStruct< iType, Impl::TaskExec< Kokkos::Cuda >
+// > TeamThreadRange( const Impl::TaskExec< Kokkos::Cuda > & thread, const iType
+// & count )
+//{
+//  return Impl::TeamThreadRangeBoundariesStruct< iType, Impl::TaskExec<
+//  Kokkos::Cuda > >( thread, count );
+//}
+//
+// template<typename iType1, typename iType2>
+// KOKKOS_INLINE_FUNCTION
+// Impl::TeamThreadRangeBoundariesStruct
+//  < std::common_type_t<iType1,iType2>
+//  , Impl::TaskExec< Kokkos::Cuda > >
+// TeamThreadRange( const Impl::TaskExec< Kokkos::Cuda > & thread
+//               , const iType1 & begin, const iType2 & end )
+//{
+//  using iType = std::common_type_t< iType1, iType2 >;
+//  return Impl::TeamThreadRangeBoundariesStruct< iType, Impl::TaskExec<
+//  Kokkos::Cuda > >(
+//           thread, iType(begin), iType(end) );
+//}
+//
+// template<typename iType>
+// KOKKOS_INLINE_FUNCTION
+// Impl::ThreadVectorRangeBoundariesStruct<iType,Impl::TaskExec< Kokkos::Cuda >
+// > ThreadVectorRange( const Impl::TaskExec< Kokkos::Cuda > & thread
+//                 , const iType & count )
+//{
+//  return Impl::ThreadVectorRangeBoundariesStruct<iType,Impl::TaskExec<
+//  Kokkos::Cuda > >(thread,count);
+//}
+//
+// template<typename iType>
+// KOKKOS_INLINE_FUNCTION
+// Impl::ThreadVectorRangeBoundariesStruct<iType,Impl::TaskExec< Kokkos::Cuda >
+// > ThreadVectorRange( const Impl::TaskExec< Kokkos::Cuda > & thread
+//                 , const iType & arg_begin
+//                 , const iType & arg_end )
+//{
+//  return Impl::ThreadVectorRangeBoundariesStruct<iType,Impl::TaskExec<
+//  Kokkos::Cuda > >(thread,arg_begin,arg_end);
+//}
+
+// KOKKOS_INLINE_FUNCTION
+// Impl::ThreadSingleStruct<Impl::TaskExec< Kokkos::Cuda > >
+// PerTeam(const Impl::TaskExec< Kokkos::Cuda >& thread)
+// {
+//   return Impl::ThreadSingleStruct<Impl::TaskExec< Kokkos::Cuda > >(thread);
+// }
+
+// KOKKOS_INLINE_FUNCTION
+// Impl::VectorSingleStruct<Impl::TaskExec< Kokkos::Cuda > >
+// PerThread(const Impl::TaskExec< Kokkos::Cuda >& thread)
+// {
+//   return Impl::VectorSingleStruct<Impl::TaskExec< Kokkos::Cuda > >(thread);
+// }
+
+/** \brief  Inter-thread parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Lambda, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Lambda& lambda) {
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i);
+  }
+}
+
+template <typename iType, class Lambda, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Lambda& lambda) {
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i);
+  }
+}
+
+// reduce across corresponding lanes between team members within warp
+// assume stride*team_size == warp_size
+template <typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void strided_shfl_warp_reduction(const JoinType& join,
+                                                        ValueType& val,
+                                                        int team_size,
+                                                        int stride) {
+  for (int lane_delta = (team_size * stride) >> 1; lane_delta >= stride;
+       lane_delta >>= 1) {
+    join(val, Kokkos::shfl_down(val, lane_delta, team_size * stride));
+  }
+}
+
+// multiple within-warp non-strided reductions
+template <typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void multi_shfl_warp_reduction(const JoinType& join,
+                                                      ValueType& val,
+                                                      int vec_length) {
+  for (int lane_delta = vec_length >> 1; lane_delta; lane_delta >>= 1) {
+    join(val, Kokkos::shfl_down(val, lane_delta, vec_length));
+  }
+}
+
+// broadcast within warp
+template <class ValueType>
+KOKKOS_INLINE_FUNCTION ValueType shfl_warp_broadcast(ValueType& val,
+                                                     int src_lane, int width) {
+  if (1 < width) {
+    return Kokkos::shfl(val, src_lane, width);
+  } else {
+    return val;
+  }
+}
+
+/*// all-reduce across corresponding vector lanes between team members within
+warp
+// assume vec_length*team_size == warp_size
+// blockDim.x == vec_length == stride
+// blockDim.y == team_size
+// threadIdx.x == position in vec
+// threadIdx.y == member number
+template< typename iType, class Lambda, typename ValueType, class JoinType >
+KOKKOS_INLINE_FUNCTION
+void parallel_reduce
+  (const Impl::TeamThreadRangeBoundariesStruct<iType,Impl::TaskExec<
+Kokkos::Cuda > >& loop_boundaries, const Lambda & lambda, const JoinType& join,
+   ValueType& initialized_result) {
+
+  ValueType result = initialized_result;
+  for( iType i = loop_boundaries.start; i < loop_boundaries.end;
+i+=loop_boundaries.increment) { lambda(i,result);
+  }
+  initialized_result = result;
+
+  strided_shfl_warp_reduction<ValueType, JoinType>(
+                          join,
+                          initialized_result,
+                          loop_boundaries.thread.team_size(),
+                          blockDim.x);
+  initialized_result = shfl_warp_broadcast<ValueType>( initialized_result,
+threadIdx.x, Impl::CudaTraits::WarpSize );
+}*/
+
+// all-reduce across corresponding vector lanes between team members within warp
+// if no join() provided, use sum
+// assume vec_length*team_size == warp_size
+// blockDim.x == vec_length == stride
+// blockDim.y == team_size
+// threadIdx.x == position in vec
+// threadIdx.y == member number
+template <typename iType, class Lambda, typename ValueType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Lambda& lambda, ValueType& initialized_result) {
+  // TODO @internal_documentation what is the point of creating this temporary?
+  ValueType result = initialized_result;
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+  initialized_result = result;
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    strided_shfl_warp_reduction(
+        [&](ValueType& val1, const ValueType& val2) { val1 += val2; },
+        initialized_result, loop_boundaries.thread.team_size(), blockDim.x);
+
+    initialized_result = shfl_warp_broadcast<ValueType>(
+        initialized_result, threadIdx.x, Impl::CudaTraits::WarpSize);
+  }
+}
+
+template <typename iType, class Lambda, typename ReducerType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Lambda& lambda, const ReducerType& reducer) {
+  using ValueType = typename ReducerType::value_type;
+  // TODO @internal_documentation what is the point of creating this temporary?
+  ValueType result = ValueType();
+  reducer.init(result);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    strided_shfl_warp_reduction(
+        [&](ValueType& val1, const ValueType& val2) {
+          reducer.join(val1, val2);
+        },
+        result, loop_boundaries.thread.team_size(), blockDim.x);
+
+    reducer.reference() = shfl_warp_broadcast<ValueType>(
+        result, threadIdx.x, Impl::CudaTraits::WarpSize);
+  } else {
+    reducer.reference() = result;
+  }
+}
+// all-reduce within team members within warp
+// assume vec_length*team_size == warp_size
+// blockDim.x == vec_length == stride
+// blockDim.y == team_size
+// threadIdx.x == position in vec
+// threadIdx.y == member number
+/*template< typename iType, class Lambda, typename ValueType, class JoinType >
+KOKKOS_INLINE_FUNCTION
+void parallel_reduce
+  (const Impl::ThreadVectorRangeBoundariesStruct<iType,Impl::TaskExec<
+Kokkos::Cuda > >& loop_boundaries, const Lambda & lambda, const JoinType& join,
+   ValueType& initialized_result) {
+
+  ValueType result = initialized_result;
+  for( iType i = loop_boundaries.start; i < loop_boundaries.end;
+i+=loop_boundaries.increment) { lambda(i,result);
+  }
+  initialized_result = result;
+
+  multi_shfl_warp_reduction<ValueType, JoinType>(join, initialized_result,
+blockDim.x); initialized_result = shfl_warp_broadcast<ValueType>(
+initialized_result, 0, blockDim.x );
+}*/
+
+// all-reduce within team members within warp
+// if no join() provided, use sum
+// assume vec_length*team_size == warp_size
+// blockDim.x == vec_length == stride
+// blockDim.y == team_size
+// threadIdx.x == position in vec
+// threadIdx.y == member number
+template <typename iType, class Lambda, typename ValueType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Lambda& lambda, ValueType& initialized_result) {
+  ValueType result = initialized_result;
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+
+  initialized_result = result;
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    // initialized_result = multi_shfl_warp_reduction(
+    multi_shfl_warp_reduction(
+        [&](ValueType& val1, const ValueType& val2) { val1 += val2; },
+        initialized_result, blockDim.x);
+
+    initialized_result =
+        shfl_warp_broadcast<ValueType>(initialized_result, 0, blockDim.x);
+  }
+}
+
+template <typename iType, class Lambda, typename ReducerType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Lambda& lambda, const ReducerType& reducer) {
+  using ValueType = typename ReducerType::value_type;
+
+  ValueType result = ValueType();
+  reducer.init(result);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    multi_shfl_warp_reduction(
+        [&](ValueType& val1, const ValueType& val2) {
+          reducer.join(val1, val2);
+        },
+        result, blockDim.x);
+
+    reducer.reference() = shfl_warp_broadcast<ValueType>(result, 0, blockDim.x);
+  } else {
+    reducer.reference() = result;
+  }
+}
+// scan across corresponding vector lanes between team members within warp
+// assume vec_length*team_size == warp_size
+// blockDim.x == vec_length == stride
+// blockDim.y == team_size
+// threadIdx.x == position in vec
+// threadIdx.y == member number
+template <typename iType, class Closure, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Closure& closure) {
+  // Extract value_type from closure
+
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    // make sure all threads perform all loop iterations
+    const iType bound = loop_boundaries.end + loop_boundaries.start;
+    const int lane    = threadIdx.y * blockDim.x;
+
+    value_type accum = 0;
+    value_type val, y, local_total;
+
+    for (iType i = loop_boundaries.start; i < bound;
+         i += loop_boundaries.increment) {
+      val = 0;
+      if (i < loop_boundaries.end) closure(i, val, false);
+
+      // intra-blockDim.y exclusive scan on 'val'
+      // accum = accumulated, sum in total for this iteration
+
+      // INCLUSIVE scan
+      for (int offset = blockDim.x; offset < Impl::CudaTraits::WarpSize;
+           offset <<= 1) {
+        y = Kokkos::shfl_up(val, offset, Impl::CudaTraits::WarpSize);
+        if (lane >= offset) {
+          val += y;
+        }
+      }
+
+      // pass accum to all threads
+      local_total = shfl_warp_broadcast<value_type>(
+          val, threadIdx.x + Impl::CudaTraits::WarpSize - blockDim.x,
+          Impl::CudaTraits::WarpSize);
+
+      // make EXCLUSIVE scan by shifting values over one
+      val = Kokkos::shfl_up(val, blockDim.x, Impl::CudaTraits::WarpSize);
+      if (threadIdx.y == 0) {
+        val = 0;
+      }
+
+      val += accum;
+      if (i < loop_boundaries.end) closure(i, val, true);
+      accum += local_total;
+    }
+  } else {
+    value_type accum = 0;
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+         i += loop_boundaries.increment) {
+      closure(i, accum, true);
+    }
+  }
+}
+
+// scan within team member (vector) within warp
+// assume vec_length*team_size == warp_size
+// blockDim.x == vec_length == stride
+// blockDim.y == team_size
+// threadIdx.x == position in vec
+// threadIdx.y == member number
+template <typename iType, class Closure, class Scheduler>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Cuda, Scheduler>>& loop_boundaries,
+    const Closure& closure) {
+  // Extract value_type from closure
+
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    // make sure all threads perform all loop iterations
+    const iType bound = loop_boundaries.end + loop_boundaries.start;
+
+    value_type accum = 0;
+    value_type val, y, local_total;
+
+    for (iType i = loop_boundaries.start; i < bound;
+         i += loop_boundaries.increment) {
+      val = 0;
+      if (i < loop_boundaries.end) closure(i, val, false);
+
+      // intra-blockDim.x exclusive scan on 'val'
+      // accum = accumulated, sum in total for this iteration
+
+      // INCLUSIVE scan
+      for (int offset = 1; offset < blockDim.x; offset <<= 1) {
+        y = Kokkos::shfl_up(val, offset, blockDim.x);
+        if (threadIdx.x >= offset) {
+          val += y;
+        }
+      }
+
+      // pass accum to all threads
+      local_total =
+          shfl_warp_broadcast<value_type>(val, blockDim.x - 1, blockDim.x);
+
+      // make EXCLUSIVE scan by shifting values over one
+      val = Kokkos::shfl_up(val, 1, blockDim.x);
+      if (threadIdx.x == 0) {
+        val = 0;
+      }
+
+      val += accum;
+      if (i < loop_boundaries.end) closure(i, val, true);
+      accum += local_total;
+    }
+  } else {
+    value_type accum = 0;
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+         i += loop_boundaries.increment) {
+      closure(i, accum, true);
+    }
+  }
+}
+
+} /* namespace Kokkos */
+
+namespace Kokkos {
+
+template <class FunctorType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::TaskExec<Kokkos::Cuda, Scheduler>>&,
+    const FunctorType& lambda) {
+#ifdef __CUDA_ARCH__
+  if (threadIdx.x == 0) lambda();
+#endif
+}
+
+template <class FunctorType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::TaskExec<Kokkos::Cuda, Scheduler>>&,
+    const FunctorType& lambda) {
+#ifdef __CUDA_ARCH__
+  if (threadIdx.x == 0 && threadIdx.y == 0) lambda();
+#endif
+}
+
+template <class FunctorType, class ValueType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::TaskExec<Kokkos::Cuda, Scheduler>>& s,
+    const FunctorType& lambda, ValueType& val) {
+#ifdef __CUDA_ARCH__
+  if (threadIdx.x == 0) lambda(val);
+  if (1 < s.team_member.team_size()) {
+    val = shfl(val, 0, blockDim.x);
+  }
+#endif
+}
+
+template <class FunctorType, class ValueType, class Scheduler>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::TaskExec<Kokkos::Cuda, Scheduler>>&
+        single_struct,
+    const FunctorType& lambda, ValueType& val) {
+#ifdef __CUDA_ARCH__
+  if (threadIdx.x == 0 && threadIdx.y == 0) {
+    lambda(val);
+  }
+  single_struct.team_member.team_broadcast(val, 0);
+#endif
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#undef KOKKOS_IMPL_CUDA_SYNCWARP_OR_RETURN
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_CUDA_TASK_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Team.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Team.hpp
new file mode 100644 (file)
index 0000000..ffafc47
--- /dev/null
@@ -0,0 +1,935 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_TEAM_HPP
+#define KOKKOS_CUDA_TEAM_HPP
+
+#include <algorithm>
+
+#include <Kokkos_Macros.hpp>
+
+/* only compile this file if CUDA is enabled for Kokkos */
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <utility>
+#include <Kokkos_Parallel.hpp>
+
+#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
+#include <Cuda/Kokkos_Cuda_ReduceScan.hpp>
+#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
+#include <Kokkos_Vectorization.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename Type>
+struct CudaJoinFunctor {
+  using value_type = Type;
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& update, const value_type& input) {
+    update += input;
+  }
+};
+
+/**\brief  Team member_type passed to TeamPolicy or TeamTask closures.
+ *
+ *  Cuda thread blocks for team closures are dimensioned as:
+ *    blockDim.x == number of "vector lanes" per "thread"
+ *    blockDim.y == number of "threads" per team
+ *    blockDim.z == number of teams in a block
+ *  where
+ *    A set of teams exactly fill a warp OR a team is the whole block
+ *      ( 0 == WarpSize % ( blockDim.x * blockDim.y ) )
+ *      OR
+ *      ( 1 == blockDim.z )
+ *
+ *  Thus when 1 < blockDim.z the team is warp-synchronous
+ *  and __syncthreads should not be called in team collectives.
+ *
+ *  When multiple teams are mapped onto a single block then the
+ *  total available shared memory must be partitioned among teams.
+ */
+class CudaTeamMember {
+ public:
+  using execution_space      = Kokkos::Cuda;
+  using scratch_memory_space = execution_space::scratch_memory_space;
+
+ private:
+  mutable void* m_team_reduce;
+  scratch_memory_space m_team_shared;
+  int m_team_reduce_size;
+  int m_league_rank;
+  int m_league_size;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_shmem() const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_scratch(
+      const int& level) const {
+    return m_team_shared.set_team_thread_mode(level, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& thread_scratch(
+      const int& level) const {
+    return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
+  }
+
+  KOKKOS_INLINE_FUNCTION int league_rank() const { return m_league_rank; }
+  KOKKOS_INLINE_FUNCTION int league_size() const { return m_league_size; }
+  KOKKOS_INLINE_FUNCTION int team_rank() const {
+    KOKKOS_IF_ON_DEVICE((return threadIdx.y;))
+    KOKKOS_IF_ON_HOST((return 0;))
+  }
+
+  KOKKOS_INLINE_FUNCTION int team_size() const {
+    KOKKOS_IF_ON_DEVICE((return blockDim.y;))
+    KOKKOS_IF_ON_HOST((return 1;))
+  }
+
+  KOKKOS_INLINE_FUNCTION void team_barrier() const {
+    KOKKOS_IF_ON_DEVICE((
+        if (1 == blockDim.z) { __syncthreads(); }  // team == block
+        else { __threadfence_block(); }            // team <= warp
+        ))
+  }
+
+  //--------------------------------------------------------------------------
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType& val,
+                                             const int& thread_id) const {
+    (void)val;
+    (void)thread_id;
+    KOKKOS_IF_ON_DEVICE((
+        if (1 == blockDim.z) {  // team == block
+          __syncthreads();
+          // Wait for shared data write until all threads arrive here
+          if (threadIdx.x == 0u && threadIdx.y == (uint32_t)thread_id) {
+            *((ValueType*)m_team_reduce) = val;
+          }
+          __syncthreads();  // Wait for shared data read until root thread
+                            // writes
+          val = *((ValueType*)m_team_reduce);
+        } else {               // team <= warp
+          ValueType tmp(val);  // input might not be a register variable
+          Impl::in_place_shfl(val, tmp, blockDim.x * thread_id,
+                              blockDim.x * blockDim.y);
+        }))
+  }
+
+  template <class Closure, class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(Closure const& f, ValueType& val,
+                                             const int& thread_id) const {
+    (void)f;
+    (void)val;
+    (void)thread_id;
+    KOKKOS_IF_ON_DEVICE((
+        f(val);
+
+        if (1 == blockDim.z) {  // team == block
+          __syncthreads();
+          // Wait for shared data write until all threads arrive here
+          if (threadIdx.x == 0u && threadIdx.y == (uint32_t)thread_id) {
+            *((ValueType*)m_team_reduce) = val;
+          }
+          __syncthreads();  // Wait for shared data read until root thread
+                            // writes
+          val = *((ValueType*)m_team_reduce);
+        } else {               // team <= warp
+          ValueType tmp(val);  // input might not be a register variable
+          Impl::in_place_shfl(val, tmp, blockDim.x * thread_id,
+                              blockDim.x * blockDim.y);
+        }))
+  }
+
+  //--------------------------------------------------------------------------
+  /**\brief  Reduction across a team
+   *
+   *  Mapping of teams onto blocks:
+   *    blockDim.x  is "vector lanes"
+   *    blockDim.y  is team "threads"
+   *    blockDim.z  is number of teams per block
+   *
+   *  Requires:
+   *    blockDim.x is power two
+   *    blockDim.x <= CudaTraits::WarpSize
+   *    ( 0 == CudaTraits::WarpSize % ( blockDim.x * blockDim.y )
+   *      OR
+   *    ( 1 == blockDim.z )
+   */
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer) const noexcept {
+    team_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer,
+              typename ReducerType::value_type& value) const noexcept {
+    (void)reducer;
+    (void)value;
+    KOKKOS_IF_ON_DEVICE(
+        (typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                        TeamPolicy<Cuda>, ReducerType>::Reducer
+             wrapped_reducer(&reducer);
+         cuda_intra_block_reduction(value, wrapped_reducer, blockDim.y);
+         reducer.reference() = value;))
+  }
+
+  //--------------------------------------------------------------------------
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering
+   *          with intra-team non-deterministic ordering accumulation.
+   *
+   *  The global inter-team accumulation value will, at the end of the
+   *  league's parallel execution, be the scan's total.
+   *  Parallel execution ordering of the league's teams is non-deterministic.
+   *  As such the base value for each team's scan operation is similarly
+   *  non-deterministic.
+   */
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value,
+                                        Type* const global_accum) const {
+    KOKKOS_IF_ON_DEVICE((
+        Type* const base_data = (Type*)m_team_reduce;
+
+        __syncthreads();  // Don't write in to shared data until all threads
+                          // have entered this function
+
+        if (0 == threadIdx.y) { base_data[0] = 0; }
+
+        base_data[threadIdx.y + 1] = value;
+        Impl::CudaJoinFunctor<Type> cuda_join_functor;
+        typename Impl::FunctorAnalysis<
+            Impl::FunctorPatternInterface::SCAN, TeamPolicy<Cuda>,
+            Impl::CudaJoinFunctor<Type>>::Reducer reducer(&cuda_join_functor);
+        Impl::cuda_intra_block_reduce_scan<true>(reducer, base_data + 1);
+
+        if (global_accum) {
+          if (blockDim.y == threadIdx.y + 1) {
+            base_data[blockDim.y] =
+                atomic_fetch_add(global_accum, base_data[blockDim.y]);
+          }
+          __syncthreads();  // Wait for atomic
+          base_data[threadIdx.y] += base_data[blockDim.y];
+        }
+
+        return base_data[threadIdx.y];))
+
+    KOKKOS_IF_ON_HOST(((void)value; (void)global_accum; return Type();))
+  }
+
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering.
+   *
+   *  The highest rank thread can compute the reduction total as
+   *    reduction_total = dev.team_scan( value ) + value ;
+   */
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value) const {
+    return this->template team_scan<Type>(value, nullptr);
+  }
+
+  //----------------------------------------
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer<ReducerType>::value>
+  vector_reduce(ReducerType const& reducer) {
+    vector_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer<ReducerType>::value>
+  vector_reduce(ReducerType const& reducer,
+                typename ReducerType::value_type& value) {
+    (void)reducer;
+    (void)value;
+    KOKKOS_IF_ON_DEVICE(
+        (if (blockDim.x == 1) return;
+
+         // Intra vector lane shuffle reduction:
+         typename ReducerType::value_type tmp(value);
+         typename ReducerType::value_type tmp2 = tmp;
+
+         unsigned mask =
+             blockDim.x == 32
+                 ? 0xffffffff
+                 : ((1 << blockDim.x) - 1)
+                       << ((threadIdx.y % (32 / blockDim.x)) * blockDim.x);
+
+         for (int i = blockDim.x; (i >>= 1);) {
+           Impl::in_place_shfl_down(tmp2, tmp, i, blockDim.x, mask);
+           if ((int)threadIdx.x < i) {
+             reducer.join(tmp, tmp2);
+           }
+         }
+
+         // Broadcast from root lane to all other lanes.
+         // Cannot use "butterfly" algorithm to avoid the broadcast
+         // because floating point summation is not associative
+         // and thus different threads could have different results.
+
+         Impl::in_place_shfl(tmp2, tmp, 0, blockDim.x, mask);
+         value = tmp2; reducer.reference() = tmp2;))
+  }
+
+  //----------------------------------------
+  // Private for the driver
+
+  KOKKOS_INLINE_FUNCTION
+  CudaTeamMember(void* shared, const size_t shared_begin,
+                 const size_t shared_size, void* scratch_level_1_ptr,
+                 const size_t scratch_level_1_size, const int arg_league_rank,
+                 const int arg_league_size)
+      : m_team_reduce(shared),
+        m_team_shared(static_cast<char*>(shared) + shared_begin, shared_size,
+                      scratch_level_1_ptr, scratch_level_1_size),
+        m_team_reduce_size(shared_begin),
+        m_league_rank(arg_league_rank),
+        m_league_size(arg_league_size) {}
+
+ public:
+  // Declare to avoid unused private member warnings which are trigger
+  // when SFINAE excludes the member function which uses these variables
+  // Making another class a friend also surpresses these warnings
+  bool impl_avoid_sfinae_warning() const noexcept {
+    return m_team_reduce_size > 0 && m_team_reduce != nullptr;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename iType>
+struct TeamThreadRangeBoundariesStruct<iType, CudaTeamMember> {
+  using index_type = iType;
+  const CudaTeamMember& member;
+  const iType start;
+  const iType end;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const CudaTeamMember& thread_, iType count)
+      : member(thread_), start(0), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const CudaTeamMember& thread_, iType begin_,
+                                  iType end_)
+      : member(thread_), start(begin_), end(end_) {}
+};
+
+template <typename iType>
+struct TeamVectorRangeBoundariesStruct<iType, CudaTeamMember> {
+  using index_type = iType;
+  const CudaTeamMember& member;
+  const iType start;
+  const iType end;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const CudaTeamMember& thread_,
+                                  const iType& count)
+      : member(thread_), start(0), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const CudaTeamMember& thread_,
+                                  const iType& begin_, const iType& end_)
+      : member(thread_), start(begin_), end(end_) {}
+};
+
+template <typename iType>
+struct ThreadVectorRangeBoundariesStruct<iType, CudaTeamMember> {
+  using index_type = iType;
+  const index_type start;
+  const index_type end;
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(const CudaTeamMember, index_type count)
+      : start(static_cast<index_type>(0)), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(index_type count)
+      : start(static_cast<index_type>(0)), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(const CudaTeamMember, index_type arg_begin,
+                                    index_type arg_end)
+      : start(arg_begin), end(arg_end) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(index_type arg_begin, index_type arg_end)
+      : start(arg_begin), end(arg_end) {}
+};
+
+}  // namespace Impl
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::CudaTeamMember>
+    TeamThreadRange(const Impl::CudaTeamMember& thread, iType count) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::CudaTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::CudaTeamMember>
+TeamThreadRange(const Impl::CudaTeamMember& thread, iType1 begin, iType2 end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::CudaTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>
+    TeamVectorRange(const Impl::CudaTeamMember& thread, const iType& count) {
+  return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::CudaTeamMember>
+TeamVectorRange(const Impl::CudaTeamMember& thread, const iType1& begin,
+                const iType2& end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>
+    ThreadVectorRange(const Impl::CudaTeamMember& thread, iType count) {
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::CudaTeamMember>
+ThreadVectorRange(const Impl::CudaTeamMember& thread, iType1 arg_begin,
+                  iType2 arg_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>(
+      thread, iType(arg_begin), iType(arg_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::CudaTeamMember> PerTeam(
+    const Impl::CudaTeamMember& thread) {
+  return Impl::ThreadSingleStruct<Impl::CudaTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::CudaTeamMember> PerThread(
+    const Impl::CudaTeamMember& thread) {
+  return Impl::VectorSingleStruct<Impl::CudaTeamMember>(thread);
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Inter-thread parallel_for.
+ *
+ *  Executes closure(iType i) for each i=[0..N).
+ *
+ * The range [0..N) is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  (void)loop_boundaries;
+  (void)closure;
+  KOKKOS_IF_ON_DEVICE(
+      (for (iType i = loop_boundaries.start + threadIdx.y;
+            i < loop_boundaries.end; i += blockDim.y) { closure(i); }))
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Inter-thread parallel_reduce with a reducer.
+ *
+ *  Executes closure(iType i, ValueType & val) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all threads of the
+ *  calling thread team and a summation of val is
+ *  performed and put into result.
+ */
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::CudaTeamMember>& loop_boundaries,
+                const Closure& closure, const ReducerType& reducer) {
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+  KOKKOS_IF_ON_DEVICE(
+      (typename ReducerType::value_type value;
+
+       reducer.init(value);
+
+       for (iType i = loop_boundaries.start + threadIdx.y;
+            i < loop_boundaries.end; i += blockDim.y) { closure(i, value); }
+
+       loop_boundaries.member.team_reduce(reducer, value);))
+}
+
+/** \brief  Inter-thread parallel_reduce assuming summation.
+ *
+ *  Executes closure(iType i, ValueType & val) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all threads of the
+ *  calling thread team and a summation of val is
+ *  performed and put into result.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::CudaTeamMember>& loop_boundaries,
+                const Closure& closure, ValueType& result) {
+  (void)loop_boundaries;
+  (void)closure;
+  (void)result;
+  KOKKOS_IF_ON_DEVICE(
+      (ValueType val; Kokkos::Sum<ValueType> reducer(val);
+
+       reducer.init(reducer.reference());
+
+       for (iType i = loop_boundaries.start + threadIdx.y;
+            i < loop_boundaries.end; i += blockDim.y) { closure(i, val); }
+
+       loop_boundaries.member.team_reduce(reducer, val);
+       result = reducer.reference();))
+}
+
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  (void)loop_boundaries;
+  (void)closure;
+  KOKKOS_IF_ON_DEVICE((for (iType i = loop_boundaries.start +
+                                      threadIdx.y * blockDim.x + threadIdx.x;
+                            i < loop_boundaries.end;
+                            i += blockDim.y * blockDim.x) { closure(i); }))
+}
+
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::CudaTeamMember>& loop_boundaries,
+                const Closure& closure, const ReducerType& reducer) {
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+  KOKKOS_IF_ON_DEVICE((typename ReducerType::value_type value;
+                       reducer.init(value);
+
+                       for (iType i = loop_boundaries.start +
+                                      threadIdx.y * blockDim.x + threadIdx.x;
+                            i < loop_boundaries.end;
+                            i += blockDim.y * blockDim.x) { closure(i, value); }
+
+                       loop_boundaries.member.vector_reduce(reducer, value);
+                       loop_boundaries.member.team_reduce(reducer, value);))
+}
+
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::CudaTeamMember>& loop_boundaries,
+                const Closure& closure, ValueType& result) {
+  (void)loop_boundaries;
+  (void)closure;
+  (void)result;
+  KOKKOS_IF_ON_DEVICE((ValueType val; Kokkos::Sum<ValueType> reducer(val);
+
+                       reducer.init(reducer.reference());
+
+                       for (iType i = loop_boundaries.start +
+                                      threadIdx.y * blockDim.x + threadIdx.x;
+                            i < loop_boundaries.end;
+                            i += blockDim.y * blockDim.x) { closure(i, val); }
+
+                       loop_boundaries.member.vector_reduce(reducer);
+                       loop_boundaries.member.team_reduce(reducer);
+                       result = reducer.reference();))
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel_for.
+ *
+ *  Executes closure(iType i) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to all vector lanes of the the calling thread.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  (void)loop_boundaries;
+  (void)closure;
+  KOKKOS_IF_ON_DEVICE((
+      for (iType i = loop_boundaries.start + threadIdx.x;
+           i < loop_boundaries.end; i += blockDim.x) { closure(i); }
+
+      __syncwarp(blockDim.x == 32
+                     ? 0xffffffff
+                     : ((1 << blockDim.x) - 1)
+                           << (threadIdx.y % (32 / blockDim.x)) * blockDim.x);))
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel_reduce.
+ *
+ *  Calls closure(iType i, ValueType & val) for each i=[0..N).
+ *
+ *  The range [0..N) is mapped to all vector lanes of
+ *  the calling thread and a reduction of val is performed using +=
+ *  and output into result.
+ *
+ *  The identity value for the += operator is assumed to be the default
+ *  constructed value.
+ */
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::CudaTeamMember> const& loop_boundaries,
+                Closure const& closure, ReducerType const& reducer) {
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+  KOKKOS_IF_ON_DEVICE((
+
+      reducer.init(reducer.reference());
+
+      for (iType i = loop_boundaries.start + threadIdx.x;
+           i < loop_boundaries.end;
+           i += blockDim.x) { closure(i, reducer.reference()); }
+
+      Impl::CudaTeamMember::vector_reduce(reducer);
+
+      ))
+}
+
+/** \brief  Intra-thread vector parallel_reduce.
+ *
+ *  Calls closure(iType i, ValueType & val) for each i=[0..N).
+ *
+ *  The range [0..N) is mapped to all vector lanes of
+ *  the calling thread and a reduction of val is performed using +=
+ *  and output into result.
+ *
+ *  The identity value for the += operator is assumed to be the default
+ *  constructed value.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!is_reducer<ValueType>::value>
+parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::CudaTeamMember> const& loop_boundaries,
+                Closure const& closure, ValueType& result) {
+  (void)loop_boundaries;
+  (void)closure;
+  (void)result;
+  KOKKOS_IF_ON_DEVICE(
+      (result = ValueType();
+
+       for (iType i = loop_boundaries.start + threadIdx.x;
+            i < loop_boundaries.end; i += blockDim.x) { closure(i, result); }
+
+       Impl::CudaTeamMember::vector_reduce(Kokkos::Sum<ValueType>(result));
+
+       ))
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Inter-thread parallel exclusive prefix sum.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to each rank in the team (whose global rank is
+ *  less than N) and a scan operation is performed. The last call to closure has
+ *  final == true.
+ */
+// This is the same code as in HIP and largely the same as in OpenMPTarget
+template <typename iType, typename FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
+        loop_bounds,
+    const FunctorType& lambda) {
+  // Extract value_type from lambda
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void,
+      FunctorType>::value_type;
+
+  const auto start     = loop_bounds.start;
+  const auto end       = loop_bounds.end;
+  auto& member         = loop_bounds.member;
+  const auto team_size = member.team_size();
+  const auto team_rank = member.team_rank();
+  const auto nchunk    = (end - start + team_size - 1) / team_size;
+  value_type accum     = 0;
+  // each team has to process one or more chunks of the prefix scan
+  for (iType i = 0; i < nchunk; ++i) {
+    auto ii = start + i * team_size + team_rank;
+    // local accumulation for this chunk
+    value_type local_accum = 0;
+    // user updates value with prefix value
+    if (ii < loop_bounds.end) lambda(ii, local_accum, false);
+    // perform team scan
+    local_accum = member.team_scan(local_accum);
+    // add this blocks accum to total accumulation
+    auto val = accum + local_accum;
+    // user updates their data with total accumulation
+    if (ii < loop_bounds.end) lambda(ii, val, true);
+    // the last value needs to be propogated to next chunk
+    if (team_rank == team_size - 1) accum = val;
+    // broadcast last value to rest of the team
+    member.team_broadcast(accum, team_size - 1);
+  }
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel scan with reducer.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all vector lanes in the
+ *  thread and a scan operation is performed.
+ *  The last call to closure has final == true.
+ */
+template <typename iType, class Closure, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_scan(const Impl::ThreadVectorRangeBoundariesStruct<
+                  iType, Impl::CudaTeamMember>& loop_boundaries,
+              const Closure& closure, const ReducerType& reducer) {
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+  KOKKOS_IF_ON_DEVICE((
+
+      using value_type = typename ReducerType::value_type;
+
+      value_type accum;
+
+      reducer.init(accum);
+
+      const value_type identity = accum;
+
+      // Loop through boundaries by vector-length chunks
+      // must scan at each iteration
+
+      // All thread "lanes" must loop the same number of times.
+      // Determine an loop end for all thread "lanes."
+      // Requires:
+      //   blockDim.x is power of two and thus
+      //     ( end % blockDim.x ) == ( end & ( blockDim.x - 1 ) )
+      //   1 <= blockDim.x <= CudaTraits::WarpSize
+
+      const int mask = blockDim.x - 1;
+      const unsigned active_mask =
+          blockDim.x == 32
+              ? 0xffffffff
+              : ((1 << blockDim.x) - 1)
+                    << (threadIdx.y % (32 / blockDim.x)) * blockDim.x;
+      const int rem = loop_boundaries.end & mask;  // == end % blockDim.x
+      const int end = loop_boundaries.end + (rem ? blockDim.x - rem : 0);
+
+      for (int i = threadIdx.x; i < end; i += blockDim.x) {
+        value_type val = identity;
+
+        // First acquire per-lane contributions.
+        // This sets i's val to i-1's contribution
+        // to make the latter in_place_shfl_up an
+        // exclusive scan -- the final accumulation
+        // of i's val will be included in the second
+        // closure call later.
+        if (i < loop_boundaries.end && threadIdx.x > 0) {
+          closure(i - 1, val, false);
+        }
+
+        // Bottom up exclusive scan in triangular pattern
+        // where each CUDA thread is the root of a reduction tree
+        // from the zeroth "lane" to itself.
+        //  [t] += [t-1] if t >= 1
+        //  [t] += [t-2] if t >= 2
+        //  [t] += [t-4] if t >= 4
+        //  ...
+        //  This differs from the non-reducer overload, where an inclusive scan
+        //  was implemented, because in general the binary operator cannot be
+        //  inverted and we would not be able to remove the inclusive
+        //  contribution by inversion.
+        for (int j = 1; j < (int)blockDim.x; j <<= 1) {
+          value_type tmp = identity;
+          Impl::in_place_shfl_up(tmp, val, j, blockDim.x, active_mask);
+          if (j <= (int)threadIdx.x) {
+            reducer.join(val, tmp);
+          }
+        }
+
+        // Include accumulation
+        reducer.join(val, accum);
+
+        // Update i's contribution into the val
+        // and add it to accum for next round
+        if (i < loop_boundaries.end) closure(i, val, true);
+        Impl::in_place_shfl(accum, val, mask, blockDim.x, active_mask);
+      }
+
+      ))
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel exclusive prefix sum.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all vector lanes in the
+ *  thread and a scan operation is performed.
+ *  The last call to closure has final == true.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::CudaTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+  value_type dummy;
+  parallel_scan(loop_boundaries, closure, Kokkos::Sum<value_type>(dummy));
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::CudaTeamMember>&,
+    const FunctorType& lambda) {
+  (void)lambda;
+  KOKKOS_IF_ON_DEVICE((
+      if (threadIdx.x == 0) { lambda(); }
+
+      __syncwarp(blockDim.x == 32
+                     ? 0xffffffff
+                     : ((1 << blockDim.x) - 1)
+                           << (threadIdx.y % (32 / blockDim.x)) * blockDim.x);))
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::CudaTeamMember>&,
+    const FunctorType& lambda) {
+  (void)lambda;
+  KOKKOS_IF_ON_DEVICE((
+      if (threadIdx.x == 0 && threadIdx.y == 0) { lambda(); }
+
+      __syncwarp(blockDim.x == 32
+                     ? 0xffffffff
+                     : ((1 << blockDim.x) - 1)
+                           << (threadIdx.y % (32 / blockDim.x)) * blockDim.x);))
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::CudaTeamMember>&,
+    const FunctorType& lambda, ValueType& val) {
+  (void)lambda;
+  (void)val;
+  KOKKOS_IF_ON_DEVICE(
+      (if (threadIdx.x == 0) { lambda(val); }
+
+       unsigned mask =
+           blockDim.x == 32
+               ? 0xffffffff
+               : ((1 << blockDim.x) - 1)
+                     << ((threadIdx.y % (32 / blockDim.x)) * blockDim.x);
+
+       Impl::in_place_shfl(val, val, 0, blockDim.x, mask);))
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::CudaTeamMember>& single_struct,
+    const FunctorType& lambda, ValueType& val) {
+  (void)single_struct;
+  (void)lambda;
+  (void)val;
+  KOKKOS_IF_ON_DEVICE(
+      (if (threadIdx.x == 0 && threadIdx.y == 0) { lambda(val); }
+
+       single_struct.team_member.team_broadcast(val, 0);))
+}
+
+}  // namespace Kokkos
+
+#endif /* defined(KOKKOS_ENABLE_CUDA) */
+
+#endif /* #ifndef KOKKOS_CUDA_TEAM_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_UniqueToken.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_UniqueToken.hpp
new file mode 100644 (file)
index 0000000..6da2cad
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_UNIQUE_TOKEN_HPP
+#define KOKKOS_CUDA_UNIQUE_TOKEN_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <Kokkos_CudaSpace.hpp>
+#include <Kokkos_UniqueToken.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+namespace Kokkos {
+
+namespace Impl {
+Kokkos::View<uint32_t*, Kokkos::CudaSpace> cuda_global_unique_token_locks(
+    bool deallocate = false);
+}
+
+namespace Experimental {
+// both global and instance Unique Tokens are implemented in the same way
+// the global version has one shared static lock array underneath
+// but it can't be a static member variable since we need to acces it on device
+// and we share the implementation with the instance version
+template <>
+class UniqueToken<Cuda, UniqueTokenScope::Global> {
+ protected:
+  Kokkos::View<uint32_t*, Kokkos::CudaSpace> m_locks;
+
+ public:
+  using execution_space = Cuda;
+  using size_type       = int32_t;
+
+  explicit UniqueToken(execution_space const& = Cuda())
+      : m_locks(Kokkos::Impl::cuda_global_unique_token_locks()) {}
+
+ protected:
+  // These are constructors for the Instance version
+  UniqueToken(size_type max_size) {
+    m_locks = Kokkos::View<uint32_t*, Kokkos::CudaSpace>(
+        "Kokkos::UniqueToken::m_locks", max_size);
+  }
+  UniqueToken(size_type max_size, execution_space const& exec) {
+    m_locks = Kokkos::View<uint32_t*, Kokkos::CudaSpace>(
+        Kokkos::view_alloc(exec, "Kokkos::UniqueToken::m_locks"), max_size);
+  }
+
+ public:
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(UniqueToken&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(UniqueToken&&) = default;
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type size() const noexcept { return m_locks.extent(0); }
+
+ private:
+  __device__ size_type impl_acquire() const {
+    int idx = blockIdx.x * (blockDim.x * blockDim.y) +
+              threadIdx.y * blockDim.x + threadIdx.x;
+    idx = idx % size();
+#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_PASCAL) || \
+    defined(KOKKOS_ARCH_MAXWELL)
+    unsigned int mask        = __activemask();
+    unsigned int active      = __ballot_sync(mask, 1);
+    unsigned int done_active = 0;
+    bool done                = false;
+    while (active != done_active) {
+      if (!done) {
+        if (Kokkos::atomic_compare_exchange(&m_locks(idx), 0, 1) == 0) {
+          done = true;
+        } else {
+          idx += blockDim.y * blockDim.x + 1;
+          idx = idx % size();
+        }
+      }
+      done_active = __ballot_sync(mask, done ? 1 : 0);
+    }
+#else
+    while (Kokkos::atomic_compare_exchange(&m_locks(idx), 0, 1) == 1) {
+      idx += blockDim.y * blockDim.x + 1;
+      idx = idx % size();
+    }
+#endif
+// Make sure that all writes in the previous lock owner are visible to me
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+    desul::atomic_thread_fence(desul::MemoryOrderAcquire(),
+                               desul::MemoryScopeDevice());
+#else
+    Kokkos::memory_fence();
+#endif
+    return idx;
+  }
+
+ public:
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type acquire() const {
+    KOKKOS_IF_ON_DEVICE(return impl_acquire();)
+    KOKKOS_IF_ON_HOST(return 0;)
+  }
+
+  /// \brief release an acquired value
+  KOKKOS_INLINE_FUNCTION
+  void release(size_type idx) const noexcept {
+// Make sure my writes are visible to the next lock owner
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+    desul::atomic_thread_fence(desul::MemoryOrderRelease(),
+                               desul::MemoryScopeDevice());
+#else
+    Kokkos::memory_fence();
+#endif
+    (void)Kokkos::atomic_exchange(&m_locks(idx), 0);
+  }
+};
+
+template <>
+class UniqueToken<Cuda, UniqueTokenScope::Instance>
+    : public UniqueToken<Cuda, UniqueTokenScope::Global> {
+ public:
+  // The instance version will forward to protected constructor which creates
+  // a lock array per instance
+  UniqueToken()
+      : UniqueToken<Cuda, UniqueTokenScope::Global>(
+            Kokkos::Cuda().concurrency()) {}
+  explicit UniqueToken(execution_space const& arg)
+      : UniqueToken<Cuda, UniqueTokenScope::Global>(
+            Kokkos::Cuda().concurrency(), arg) {}
+  explicit UniqueToken(size_type max_size)
+      : UniqueToken<Cuda, UniqueTokenScope::Global>(max_size) {}
+  UniqueToken(size_type max_size, execution_space const& arg)
+      : UniqueToken<Cuda, UniqueTokenScope::Global>(max_size, arg) {}
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif  // KOKKOS_ENABLE_CUDA
+#endif  // KOKKOS_CUDA_UNIQUE_TOKEN_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Vectorization.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_Vectorization.hpp
new file mode 100644 (file)
index 0000000..d3d8814
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_CUDA_VECTORIZATION_HPP
+#define KOKKOS_CUDA_VECTORIZATION_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+
+#include <type_traits>
+
+#if !defined(KOKKOS_COMPILER_CLANG)
+#define KOKKOS_IMPL_CUDA_MAX_SHFL_SIZEOF sizeof(long long)
+#else
+#define KOKKOS_IMPL_CUDA_MAX_SHFL_SIZEOF sizeof(int)
+#endif
+
+namespace Kokkos {
+
+namespace Impl {
+
+// Include all lanes
+constexpr unsigned shfl_all_mask = 0xffffffffu;
+
+//----------------------------------------------------------------------------
+// Shuffle operations require input to be a register (stack) variable
+
+// Derived implements do_shfl_op(unsigned mask, T& in, int lane, int width),
+// which turns in to one of __shfl_sync(_up|_down)
+// Since the logic with respect to value sizes, etc., is the same everywhere,
+// put it all in one place.
+template <class Derived>
+struct in_place_shfl_op {
+  // CRTP boilerplate
+  __device__ KOKKOS_IMPL_FORCEINLINE const Derived& self() const noexcept {
+    return *static_cast<Derived const*>(this);
+  }
+
+  // sizeof(Scalar) <= sizeof(int) case
+  template <class Scalar>
+  // requires _assignable_from_bits<Scalar>
+  __device__ inline std::enable_if_t<sizeof(Scalar) <= sizeof(int)> operator()(
+      Scalar& out, Scalar const& in, int lane_or_delta, int width,
+      unsigned mask = shfl_all_mask) const noexcept {
+    using shfl_type = int;
+    union conv_type {
+      Scalar orig;
+      shfl_type conv;
+      // This should be fine, members get explicitly reset, which changes the
+      // active member
+      KOKKOS_FUNCTION conv_type() { conv = 0; }
+    };
+    conv_type tmp_in;
+    tmp_in.orig = in;
+    shfl_type tmp_out;
+    tmp_out = reinterpret_cast<shfl_type&>(tmp_in.orig);
+    conv_type res;
+    //------------------------------------------------
+    res.conv = self().do_shfl_op(mask, tmp_out, lane_or_delta, width);
+    //------------------------------------------------
+    out = reinterpret_cast<Scalar&>(res.conv);
+  }
+
+// TODO: figure out why 64-bit shfl fails in Clang
+#if !defined(KOKKOS_COMPILER_CLANG)
+  // sizeof(Scalar) == sizeof(double) case
+  // requires _assignable_from_bits<Scalar>
+  template <class Scalar>
+  __device__ inline std::enable_if_t<sizeof(Scalar) == sizeof(double)>
+  operator()(Scalar& out, Scalar const& in, int lane_or_delta, int width,
+             unsigned mask = shfl_all_mask) const noexcept {
+    //------------------------------------------------
+    reinterpret_cast<double&>(out) = self().do_shfl_op(
+        mask, *reinterpret_cast<double const*>(&in), lane_or_delta, width);
+    //------------------------------------------------
+  }
+#else
+  // sizeof(Scalar) == sizeof(double) case
+  // requires _assignable_from_bits<Scalar>
+  template <typename Scalar>
+  __device__ inline std::enable_if_t<sizeof(Scalar) == sizeof(double)>
+  operator()(Scalar& out, const Scalar& val, int lane_or_delta, int width,
+             unsigned mask = shfl_all_mask) const noexcept {
+    //------------------------------------------------
+    int lo   = __double2loint(*reinterpret_cast<const double*>(&val));
+    int hi   = __double2hiint(*reinterpret_cast<const double*>(&val));
+    lo       = self().do_shfl_op(mask, lo, lane_or_delta, width);
+    hi       = self().do_shfl_op(mask, hi, lane_or_delta, width);
+    auto tmp = __hiloint2double(hi, lo);
+    out      = reinterpret_cast<Scalar&>(tmp);
+    //------------------------------------------------
+  }
+#endif
+
+  // sizeof(Scalar) > sizeof(double) case
+  template <typename Scalar>
+  __device__ inline std::enable_if_t<(sizeof(Scalar) > sizeof(double))>
+  operator()(Scalar& out, const Scalar& val, int lane_or_delta, int width,
+             unsigned mask = shfl_all_mask) const noexcept {
+    // TODO DSH shouldn't this be KOKKOS_IMPL_CUDA_MAX_SHFL_SIZEOF instead of
+    //      sizeof(int)? (Need benchmarks to decide which is faster)
+    using shuffle_as_t = int;
+    enum : int { N = sizeof(Scalar) / sizeof(shuffle_as_t) };
+
+    for (int i = 0; i < N; ++i) {
+      reinterpret_cast<shuffle_as_t*>(&out)[i] = self().do_shfl_op(
+          mask, reinterpret_cast<shuffle_as_t const*>(&val)[i], lane_or_delta,
+          width);
+    }
+  }
+};
+
+struct in_place_shfl_fn : in_place_shfl_op<in_place_shfl_fn> {
+  template <class T>
+  __device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(unsigned mask, T& val,
+                                                  int lane, int width) const
+      noexcept {
+    (void)mask;
+    (void)val;
+    (void)lane;
+    (void)width;
+    return __shfl_sync(mask, val, lane, width);
+  }
+};
+template <class... Args>
+__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl(Args&&... args) noexcept {
+  in_place_shfl_fn{}((Args &&) args...);
+}
+
+struct in_place_shfl_up_fn : in_place_shfl_op<in_place_shfl_up_fn> {
+  template <class T>
+  __device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(unsigned mask, T& val,
+                                                  int lane, int width) const
+      noexcept {
+    return __shfl_up_sync(mask, val, lane, width);
+  }
+};
+template <class... Args>
+__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_up(
+    Args&&... args) noexcept {
+  in_place_shfl_up_fn{}((Args &&) args...);
+}
+
+struct in_place_shfl_down_fn : in_place_shfl_op<in_place_shfl_down_fn> {
+  template <class T>
+  __device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(unsigned mask, T& val,
+                                                  int lane, int width) const
+      noexcept {
+    (void)mask;
+    (void)val;
+    (void)lane;
+    (void)width;
+    return __shfl_down_sync(mask, val, lane, width);
+  }
+};
+template <class... Args>
+__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_down(
+    Args&&... args) noexcept {
+  in_place_shfl_down_fn{}((Args &&) args...);
+}
+
+}  // namespace Impl
+
+template <class T>
+// requires default_constructible<T> && _assignable_from_bits<T>
+__device__ inline T shfl(const T& val, const int& srcLane, const int& width,
+                         unsigned mask = Impl::shfl_all_mask) {
+  T rv = {};
+  Impl::in_place_shfl(rv, val, srcLane, width, mask);
+  return rv;
+}
+
+template <class T>
+// requires default_constructible<T> && _assignable_from_bits<T>
+__device__ inline T shfl_down(const T& val, int delta, int width,
+                              unsigned mask = Impl::shfl_all_mask) {
+  T rv = {};
+  Impl::in_place_shfl_down(rv, val, delta, width, mask);
+  return rv;
+}
+
+template <class T>
+// requires default_constructible<T> && _assignable_from_bits<T>
+__device__ inline T shfl_up(const T& val, int delta, int width,
+                            unsigned mask = Impl::shfl_all_mask) {
+  T rv = {};
+  Impl::in_place_shfl_up(rv, val, delta, width, mask);
+  return rv;
+}
+
+}  // end namespace Kokkos
+
+#undef KOKKOS_IMPL_CUDA_MAX_SHFL_SIZEOF
+
+#endif  // defined( KOKKOS_ENABLE_CUDA )
+#endif  // !defined( KOKKOS_CUDA_VECTORIZATION_HPP )
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_View.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_View.hpp
new file mode 100644 (file)
index 0000000..a175820
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_CUDA_VIEW_HPP
+#define KOKKOS_EXPERIMENTAL_CUDA_VIEW_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+// Cuda Texture fetches can be performed for 4, 8 and 16 byte objects
+// (int,int2,int4) Via reinterpret_case this can be used to support all scalar
+// types of those sizes. Any other scalar type falls back to either normal reads
+// out of global memory, or using the __ldg intrinsic on Kepler GPUs or newer
+// (Compute Capability >= 3.0)
+
+template <typename ValueType, typename AliasType>
+struct CudaTextureFetch {
+  ::cudaTextureObject_t m_obj;
+  const ValueType* m_ptr;
+  int m_offset;
+
+  // Deference operator pulls through texture object and returns by value
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION ValueType operator[](const iType& i) const {
+    KOKKOS_IF_ON_DEVICE(
+        (AliasType v = tex1Dfetch<AliasType>(m_obj, i + m_offset);
+         return *(reinterpret_cast<ValueType*>(&v));))
+    KOKKOS_IF_ON_HOST((return m_ptr[i];))
+  }
+
+  // Pointer to referenced memory
+  KOKKOS_INLINE_FUNCTION
+  operator const ValueType*() const { return m_ptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  CudaTextureFetch() : m_obj(), m_ptr(), m_offset() {}
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~CudaTextureFetch() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  CudaTextureFetch(const CudaTextureFetch& rhs)
+      : m_obj(rhs.m_obj), m_ptr(rhs.m_ptr), m_offset(rhs.m_offset) {}
+
+  KOKKOS_INLINE_FUNCTION
+  CudaTextureFetch(CudaTextureFetch&& rhs)
+      : m_obj(rhs.m_obj), m_ptr(rhs.m_ptr), m_offset(rhs.m_offset) {}
+
+  KOKKOS_INLINE_FUNCTION
+  CudaTextureFetch& operator=(const CudaTextureFetch& rhs) {
+    m_obj    = rhs.m_obj;
+    m_ptr    = rhs.m_ptr;
+    m_offset = rhs.m_offset;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  CudaTextureFetch& operator=(CudaTextureFetch&& rhs) {
+    m_obj    = rhs.m_obj;
+    m_ptr    = rhs.m_ptr;
+    m_offset = rhs.m_offset;
+    return *this;
+  }
+
+  // Texture object spans the entire allocation.
+  // This handle may view a subset of the allocation, so an offset is required.
+  template <class CudaMemorySpace>
+  inline explicit CudaTextureFetch(
+      const ValueType* const arg_ptr,
+      Kokkos::Impl::SharedAllocationRecord<CudaMemorySpace, void>* record)
+      : m_obj(record->template attach_texture_object<AliasType>()),
+        m_ptr(arg_ptr),
+        m_offset(record->attach_texture_object_offset(
+            reinterpret_cast<const AliasType*>(arg_ptr))) {}
+
+  // Texture object spans the entire allocation.
+  // This handle may view a subset of the allocation, so an offset is required.
+  KOKKOS_INLINE_FUNCTION
+  CudaTextureFetch(const CudaTextureFetch& rhs, size_t offset)
+      : m_obj(rhs.m_obj),
+        m_ptr(rhs.m_ptr + offset),
+        m_offset(offset + rhs.m_offset) {}
+};
+
+#if defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
+
+template <typename ValueType, typename AliasType>
+struct CudaLDGFetch {
+  const ValueType* m_ptr;
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION ValueType operator[](const iType& i) const {
+#if defined(KOKKOS_ARCH_KEPLER30) || defined(KOKKOS_ARCH_KEPLER32)
+    return m_ptr[i];
+#else
+    KOKKOS_IF_ON_DEVICE(
+        (AliasType v = __ldg(reinterpret_cast<const AliasType*>(&m_ptr[i]));
+         return *(reinterpret_cast<ValueType*>(&v));))
+    KOKKOS_IF_ON_HOST((return m_ptr[i];))
+#endif
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  operator const ValueType*() const { return m_ptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  CudaLDGFetch() : m_ptr() {}
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~CudaLDGFetch() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  CudaLDGFetch(const CudaLDGFetch& rhs) : m_ptr(rhs.m_ptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  CudaLDGFetch(CudaLDGFetch&& rhs) : m_ptr(rhs.m_ptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  CudaLDGFetch& operator=(const CudaLDGFetch& rhs) {
+    m_ptr = rhs.m_ptr;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  CudaLDGFetch& operator=(CudaLDGFetch&& rhs) {
+    m_ptr = rhs.m_ptr;
+    return *this;
+  }
+
+  template <class CudaMemorySpace>
+  inline explicit CudaLDGFetch(
+      const ValueType* const arg_ptr,
+      Kokkos::Impl::SharedAllocationRecord<CudaMemorySpace, void>*)
+      : m_ptr(arg_ptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  CudaLDGFetch(CudaLDGFetch const rhs, size_t offset)
+      : m_ptr(rhs.m_ptr + offset) {}
+};
+
+#endif
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/** \brief  Replace Default ViewDataHandle with Cuda texture fetch
+ * specialization if 'const' value type, CudaSpace and random access.
+ */
+template <class Traits>
+class ViewDataHandle<
+    Traits, std::enable_if_t<(
+                // Is Cuda memory space
+                (std::is_same<typename Traits::memory_space,
+                              Kokkos::CudaSpace>::value ||
+                 std::is_same<typename Traits::memory_space,
+                              Kokkos::CudaUVMSpace>::value) &&
+                // Is a trivial const value of 4, 8, or 16 bytes
+                std::is_trivial<typename Traits::const_value_type>::value &&
+                std::is_same<typename Traits::const_value_type,
+                             typename Traits::value_type>::value &&
+                (sizeof(typename Traits::const_value_type) == 4 ||
+                 sizeof(typename Traits::const_value_type) == 8 ||
+                 sizeof(typename Traits::const_value_type) == 16) &&
+                // Random access trait
+                (Traits::memory_traits::is_random_access != 0))>> {
+ public:
+  using track_type = Kokkos::Impl::SharedAllocationTracker;
+
+  using value_type  = typename Traits::const_value_type;
+  using return_type = typename Traits::const_value_type;  // NOT a reference
+
+  using alias_type = std::conditional_t<
+      (sizeof(value_type) == 4), int,
+      std::conditional_t<
+          (sizeof(value_type) == 8), ::int2,
+          std::conditional_t<(sizeof(value_type) == 16), ::int4, void>>>;
+
+#if defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
+  using handle_type = Kokkos::Impl::CudaLDGFetch<value_type, alias_type>;
+#else
+  using handle_type = Kokkos::Impl::CudaTextureFetch<value_type, alias_type>;
+#endif
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type const& assign(handle_type const& arg_handle,
+                                   track_type const& /* arg_tracker */) {
+    return arg_handle;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type const assign(handle_type const& arg_handle,
+                                  size_t offset) {
+    return handle_type(arg_handle, offset);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type assign(value_type* arg_data_ptr,
+                            track_type const& arg_tracker) {
+    if (arg_data_ptr == nullptr) return handle_type();
+
+#if !defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
+    KOKKOS_IF_ON_HOST((
+        // Assignment of texture = non-texture requires creation of a texture
+        // object which can only occur on the host.  In addition, 'get_record'
+        // is only valid if called in a host execution space
+
+        using memory_space = typename Traits::memory_space;
+        using record =
+            typename Impl::SharedAllocationRecord<memory_space, void>;
+
+        record* const r = arg_tracker.template get_record<memory_space>();
+
+        if (0 == r) {
+          Kokkos::abort(
+              "Cuda const random access View using Cuda texture memory "
+              "requires "
+              "Kokkos to allocate the View's memory");
+        }
+
+        return handle_type(arg_data_ptr, r);))
+#else
+    KOKKOS_IF_ON_HOST((
+        // Assignment of texture = non-texture requires creation of a texture
+        // object which can only occur on the host.  In addition, 'get_record'
+        // is only valid if called in a host execution space
+
+        using memory_space = typename Traits::memory_space;
+        using record =
+            typename Impl::SharedAllocationRecord<memory_space, void>;
+
+        record* const r = arg_tracker.template get_record<memory_space>();
+
+        return handle_type(arg_data_ptr, r);))
+#endif
+
+    KOKKOS_IF_ON_DEVICE(
+        ((void)arg_tracker; Kokkos::Impl::cuda_abort(
+             "Cannot create Cuda texture object from within a Cuda kernel");
+         return handle_type();))
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
+#endif /* #ifndef KOKKOS_CUDA_VIEW_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_WorkGraphPolicy.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_WorkGraphPolicy.hpp
new file mode 100644 (file)
index 0000000..fb3a6b1
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_WORKGRAPHPOLICY_HPP
+#define KOKKOS_CUDA_WORKGRAPHPOLICY_HPP
+
+#include <Kokkos_Cuda.hpp>
+#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+                  Kokkos::Cuda> {
+ public:
+  using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+  using Self   = ParallelFor<FunctorType, Policy, Kokkos::Cuda>;
+
+ private:
+  Policy m_policy;
+  FunctorType m_functor;
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    m_functor(w);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    const TagType t{};
+    m_functor(t, w);
+  }
+
+ public:
+  Policy const& get_policy() const { return m_policy; }
+
+  __device__ inline void operator()() const noexcept {
+    if (0 == (threadIdx.y % 16)) {
+      // Spin until COMPLETED_TOKEN.
+      // END_TOKEN indicates no work is currently available.
+
+      for (std::int32_t w = Policy::END_TOKEN;
+           Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+        if (Policy::END_TOKEN != w) {
+          exec_one<typename Policy::work_tag>(w);
+          m_policy.completed_work(w);
+        }
+      }
+    }
+  }
+
+  inline void execute() {
+    const int warps_per_block = 4;
+    const dim3 grid(Kokkos::Impl::cuda_internal_multiprocessor_count(), 1, 1);
+    const dim3 block(1, Kokkos::Impl::CudaTraits::WarpSize, warps_per_block);
+    const int shared = 0;
+
+    Kokkos::Impl::CudaParallelLaunch<Self>(
+        *this, grid, block, shared, Cuda().impl_internal_space_instance(),
+        false);
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #define KOKKOS_CUDA_WORKGRAPHPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_abort.hpp b/bundled/kokkos-3.7.00/core/src/Cuda/Kokkos_Cuda_abort.hpp
new file mode 100644 (file)
index 0000000..c0daa27
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_ABORT_HPP
+#define KOKKOS_CUDA_ABORT_HPP
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <cuda.h>
+
+extern "C" {
+/*  Cuda runtime function, declared in <crt/device_runtime.h>
+ *  Requires capability 2.x or better.
+ */
+extern __device__ void __assertfail(const void *message, const void *file,
+                                    unsigned int line, const void *function,
+                                    size_t charsize);
+}
+
+namespace Kokkos {
+namespace Impl {
+
+#if !defined(__APPLE__)
+// required to workaround failures in random number generator unit tests with
+// pre-volta architectures
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+__device__ inline void cuda_abort(const char *const message) {
+#else
+[[noreturn]] __device__ inline void cuda_abort(const char *const message) {
+#endif
+  const char empty[] = "";
+
+  __assertfail((const void *)message, (const void *)empty, (unsigned int)0,
+               (const void *)empty, sizeof(char));
+
+  // This loop is never executed. It's intended to suppress warnings that the
+  // function returns, even though it does not. This is necessary because
+  // __assertfail is not marked as [[noreturn]], even though it does not return.
+  //  Disable with KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK to workaround failures
+  //  in random number generator unit tests with pre-volta architectures
+#if !defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+  while (true)
+    ;
+#endif
+}
+#else
+__device__ inline void cuda_abort(const char *const message) {
+  // __assertfail is not supported on MAC
+}
+#endif
+
+}  // namespace Impl
+}  // namespace Kokkos
+#else
+void KOKKOS_CORE_SRC_CUDA_ABORT_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
+#endif /* #ifndef KOKKOS_CUDA_ABORT_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Abort.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Abort.hpp
new file mode 100644 (file)
index 0000000..dcc5863
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_ABORT_HPP
+#define KOKKOS_HIP_ABORT_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_HIP)
+
+#include <hip/hip_runtime.h>
+
+// FIXME_HIP ROCm 4.5 version header include would be <rocm/rocm_version.h>
+#if __has_include(<rocm_version.h>)
+#include <rocm_version.h>
+#define KOKKOS_IMPL_ROCM_VERSION \
+  ROCM_VERSION_MAJOR * 10000 + ROCM_VERSION_MINOR * 100 + ROCM_VERSION_PATCH
+#endif
+
+// FIXME_HIP workaround for ROCm version less than 5.0.2
+#if KOKKOS_IMPL_ROCM_VERSION < 50002
+#define KOKKOS_IMPL_HIP_ABORT_DOES_NOT_PRINT_MESSAGE
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+// The two keywords below are not contradictory. `noinline` is a
+// directive to the optimizer.
+[[noreturn]] __device__ __attribute__((noinline)) inline void hip_abort(
+    char const *msg) {
+  const char empty[] = "";
+  __assert_fail(msg, empty, 0, empty);
+  // This loop is never executed. It's intended to suppress warnings that the
+  // function returns, even though it does not. This is necessary because
+  // abort() is not marked as [[noreturn]], even though it does not return.
+  while (true)
+    ;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Atomic.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Atomic.hpp
new file mode 100644 (file)
index 0000000..88bcab6
--- /dev/null
@@ -0,0 +1,618 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_ATOMIC_HPP
+#define KOKKOS_HIP_ATOMIC_HPP
+
+#include <impl/Kokkos_Atomic_Memory_Order.hpp>
+#include <impl/Kokkos_Memory_Fence.hpp>
+#include <HIP/Kokkos_HIP_Locks.hpp>
+
+#if defined(KOKKOS_ENABLE_HIP_ATOMICS)
+namespace Kokkos {
+// HIP can do:
+// Types int/unsigned int
+// variants:
+// atomic_exchange/compare_exchange/fetch_add/fetch_sub/fetch_max/fetch_min/fetch_and/fetch_or/fetch_xor/fetch_inc/fetch_dec
+
+// atomic_exchange -------------------------------------------------------------
+
+__inline__ __device__ int atomic_exchange(volatile int *const dest,
+                                          const int val) {
+  return atomicExch(const_cast<int *>(dest), val);
+}
+
+__inline__ __device__ unsigned int atomic_exchange(
+    volatile unsigned int *const dest, const unsigned int val) {
+  return atomicExch(const_cast<unsigned int *>(dest), val);
+}
+
+__inline__ __device__ unsigned long long int atomic_exchange(
+    volatile unsigned long long int *const dest,
+    const unsigned long long int val) {
+  return atomicExch(const_cast<unsigned long long *>(dest), val);
+}
+
+__inline__ __device__ float atomic_exchange(volatile float *const dest,
+                                            const float val) {
+  return atomicExch(const_cast<float *>(dest), val);
+}
+
+template <typename T>
+__inline__ __device__ T
+atomic_exchange(volatile T *const dest,
+                std::enable_if_t<sizeof(T) == sizeof(int), const T &> val) {
+  int tmp = atomicExch(reinterpret_cast<int *>(const_cast<T *>(dest)),
+                       *reinterpret_cast<int *>(const_cast<T *>(&val)));
+  return reinterpret_cast<T &>(tmp);
+}
+
+template <typename T>
+__inline__ __device__ T atomic_exchange(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T &>
+        val) {
+  using type = unsigned long long int;
+
+  type tmp = atomicExch(reinterpret_cast<type *>(const_cast<T *>(dest)),
+                        *reinterpret_cast<type *>(const_cast<T *>(&val)));
+  return reinterpret_cast<T &>(tmp);
+}
+
+template <typename T>
+__inline__ __device__ T atomic_exchange(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
+                     const T> &val) {
+  T return_val;
+  int done                 = 0;
+  unsigned int active      = __ballot(1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip_space((void *)dest)) {
+        return_val = *dest;
+        *dest      = val;
+        Impl::unlock_address_hip_space((void *)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot(done);
+  }
+  return return_val;
+}
+
+// atomic_assign ---------------------------------------------------------------
+
+template <typename T>
+__inline__ __device__ void atomic_assign(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T &> val) {
+  atomicExch(reinterpret_cast<int *>(const_cast<T *>(dest)),
+             *reinterpret_cast<int *>(const_cast<T *>(&val)));
+}
+
+template <typename T>
+__inline__ __device__ void atomic_assign(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T &>
+        val) {
+  using type = unsigned long long int;
+  atomicExch(reinterpret_cast<type *>(const_cast<T *>(dest)),
+             *reinterpret_cast<type *>(const_cast<T *>(&val)));
+}
+
+template <typename T>
+__inline__ __device__ void atomic_assign(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) != sizeof(unsigned long long int),
+                     const T &>
+        val) {
+  atomic_exchange(dest, val);
+}
+
+// atomic_compare_exchange -----------------------------------------------------
+
+inline __device__ int atomic_compare_exchange(volatile int *dest, int compare,
+                                              const int &val) {
+  return atomicCAS(const_cast<int *>(dest), compare, val);
+}
+
+inline __device__ unsigned int atomic_compare_exchange(
+    volatile unsigned int *dest, unsigned int compare,
+    const unsigned int &val) {
+  return atomicCAS(const_cast<unsigned int *>(dest), compare, val);
+}
+
+inline __device__ unsigned long long int atomic_compare_exchange(
+    volatile unsigned long long int *dest, unsigned long long int compare,
+    const unsigned long long int &val) {
+  return atomicCAS(const_cast<unsigned long long int *>(dest), compare, val);
+}
+
+template <class T>
+__inline__ __device__ T atomic_compare_exchange(
+    volatile T *dest, T compare,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T &> val) {
+  // FIXME_HIP UB
+  union U {
+    int i;
+    T f;
+    __inline__ __device__ U() {}
+  } idest, icompare, ival;
+  icompare.f = compare;
+  ival.f     = val;
+  idest.i    = atomicCAS(reinterpret_cast<int *>(const_cast<T *>(dest)),
+                      icompare.i, ival.i);
+  return idest.f;
+}
+
+template <class T>
+__inline__ __device__ T atomic_compare_exchange(
+    volatile T *dest, T compare,
+    std::enable_if_t<sizeof(T) == sizeof(unsigned long long int), const T &>
+        val) {
+  // FIXME_HIP UB
+  union U {
+    unsigned long long int i;
+    T f;
+    __inline__ __device__ U() {}
+  } idest, icompare, ival;
+  icompare.f = compare;
+  ival.f     = val;
+  idest.i    = atomicCAS(
+      reinterpret_cast<unsigned long long int *>(const_cast<T *>(dest)),
+      icompare.i, ival.i);
+  return idest.f;
+}
+
+template <typename T>
+__inline__ __device__ T atomic_compare_exchange(
+    volatile T *const dest, const T &compare,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
+                     const T> &val) {
+  T return_val;
+  int done                 = 0;
+  unsigned int active      = __ballot(1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip_space((void *)dest)) {
+        return_val = *dest;
+        if (return_val == compare) *dest = val;
+        Impl::unlock_address_hip_space((void *)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot(done);
+  }
+  return return_val;
+}
+
+// atomic_fetch_add ------------------------------------------------------------
+
+inline __device__ int atomic_fetch_add(volatile int *dest, const int &val) {
+  return atomicAdd(const_cast<int *>(dest), val);
+}
+
+inline __device__ unsigned int atomic_fetch_add(volatile unsigned int *dest,
+                                                const unsigned int &val) {
+  return atomicAdd(const_cast<unsigned int *>(dest), val);
+}
+
+inline __device__ unsigned long long atomic_fetch_add(
+    volatile unsigned long long *dest, const unsigned long long &val) {
+  return atomicAdd(const_cast<unsigned long long *>(dest), val);
+}
+
+inline __device__ float atomic_fetch_add(volatile float *dest,
+                                         const float &val) {
+  return atomicAdd(const_cast<float *>(dest), val);
+}
+
+template <typename T>
+inline __device__ T
+atomic_fetch_add(volatile T *const dest,
+                 std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
+  // FIXME_HIP UB
+  union U {
+    int i;
+    T t;
+    __inline__ __device__ U() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t + val;
+    oldval.i = atomicCAS(reinterpret_cast<int *>(const_cast<T *>(dest)),
+                         assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <typename T>
+inline __device__ T atomic_fetch_add(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) == sizeof(long long), const T> val) {
+  // FIXME_HIP UB
+  union U {
+    unsigned long long i;
+    T t;
+    __inline__ __device__ U() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t + val;
+    oldval.i = atomic_compare_exchange(
+        reinterpret_cast<volatile unsigned long long *>(dest), assume.i,
+        newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+__inline__ __device__ char atomic_fetch_add(volatile char *dest,
+                                            const char &val) {
+  unsigned int oldval, newval, assume;
+  oldval = *reinterpret_cast<volatile unsigned int *>(&dest);
+
+  do {
+    assume = oldval;
+    newval = assume & 0x7fffff00 + ((assume & 0xff) + val) & 0xff;
+    oldval =
+        atomicCAS(reinterpret_cast<unsigned int *>(const_cast<char *>(dest)),
+                  assume, newval);
+  } while (assume != oldval);
+
+  return oldval;
+}
+
+__inline__ __device__ short atomic_fetch_add(volatile short *dest,
+                                             const short &val) {
+  unsigned int oldval, newval, assume;
+  oldval = *reinterpret_cast<volatile unsigned int *>(&dest);
+
+  do {
+    assume = oldval;
+    newval = assume & 0x7fff0000 + ((assume & 0xffff) + val) & 0xffff;
+    oldval =
+        atomicCAS(reinterpret_cast<unsigned int *>(const_cast<short *>(dest)),
+                  assume, newval);
+  } while (assume != oldval);
+
+  return oldval;
+}
+
+__inline__ __device__ long long atomic_fetch_add(volatile long long *dest,
+                                                 const long long &val) {
+  return atomicAdd(
+      reinterpret_cast<unsigned long long *>(const_cast<long long *>(dest)),
+      val);
+}
+
+template <class T>
+__inline__ __device__ T atomic_fetch_add(
+    volatile T *dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
+                     const T &>
+        val) {
+  T return_val;
+  int done                 = 0;
+  unsigned int active      = __ballot(1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Kokkos::Impl::lock_address_hip_space((void *)dest)) {
+        return_val = *dest;
+        *dest      = return_val + val;
+        Kokkos::Impl::unlock_address_hip_space((void *)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot(done);
+  }
+  return return_val;
+}
+
+// atmic_fetch_sub -------------------------------------------------------------
+
+__inline__ __device__ int atomic_fetch_sub(volatile int *dest, int const &val) {
+  return atomicSub(const_cast<int *>(dest), val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_sub(volatile unsigned int *dest,
+                                                    unsigned int const &val) {
+  return atomicSub(const_cast<unsigned int *>(dest), val);
+}
+
+__inline__ __device__ unsigned long long atomic_fetch_sub(
+    unsigned long long *dest, int64_t const &val) {
+  return atomicAdd(reinterpret_cast<unsigned long long *>(dest),
+                   -reinterpret_cast<unsigned long long const &>(val));
+}
+
+__inline__ __device__ char atomic_fetch_sub(volatile char *dest,
+                                            const char &val) {
+  unsigned int oldval, newval, assume;
+  oldval = *reinterpret_cast<volatile unsigned int *>(dest);
+
+  do {
+    assume = oldval;
+    newval = assume & 0x7fffff00 + ((assume & 0xff) - val) & 0xff;
+    oldval =
+        atomicCAS(reinterpret_cast<unsigned int *>(const_cast<char *>(dest)),
+                  assume, newval);
+  } while (assume != oldval);
+
+  return oldval;
+}
+
+__inline__ __device__ short atomic_fetch_sub(volatile short *dest,
+                                             const short &val) {
+  unsigned int oldval, newval, assume;
+  oldval = *reinterpret_cast<volatile unsigned int *>(dest);
+
+  do {
+    assume = oldval;
+    newval = assume & 0x7fff0000 + ((assume & 0xffff) - val) & 0xffff;
+    oldval =
+        atomicCAS(reinterpret_cast<unsigned int *>(const_cast<short *>(dest)),
+                  assume, newval);
+  } while (assume != oldval);
+
+  return oldval;
+}
+
+__inline__ __device__ long long atomic_fetch_sub(volatile long long *dest,
+                                                 const long long &val) {
+  return static_cast<long long>(atomicAdd(
+      reinterpret_cast<unsigned long long int *>(const_cast<long long *>(dest)),
+      -reinterpret_cast<unsigned long long int const &>(val)));
+}
+
+template <class T>
+__inline__ __device__ T atomic_fetch_sub(
+    volatile T *dest, std::enable_if_t<sizeof(T) == sizeof(int), T> val) {
+  // FIXME_HIP UB
+  union U {
+    int i;
+    T t;
+    __inline__ __device__ U() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t - val;
+    oldval.i = atomic_compare_exchange(reinterpret_cast<volatile int *>(dest),
+                                       assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <typename T>
+inline __device__ T atomic_fetch_sub(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) == sizeof(long long), const T> val) {
+  // FIXME_HIP UB
+  union U {
+    unsigned long long i;
+    T t;
+    __inline__ __device__ U() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t - val;
+    oldval.i = atomic_compare_exchange(
+        reinterpret_cast<volatile unsigned long long *>(dest), assume.i,
+        newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <class T>
+__inline__ __device__ T atomic_fetch_sub(
+    volatile T *dest, std::enable_if_t<sizeof(T) == sizeof(char), T> val) {
+  unsigned int oldval, newval, assume;
+  oldval = *reinterpret_cast<volatile unsigned int *>(dest);
+
+  do {
+    assume = oldval;
+    newval = assume & 0x7fffff00 + ((assume & 0xff) - val) & 0xff;
+    oldval = atomicCAS(reinterpret_cast<unsigned int *>(dest), assume, newval);
+  } while (assume != oldval);
+
+  return reinterpret_cast<T>(oldval) & 0xff;
+}
+
+template <class T>
+__inline__ __device__ T atomic_fetch_sub(
+    volatile T *dest, std::enable_if_t<sizeof(T) == sizeof(short), T> val) {
+  unsigned int oldval, newval, assume;
+  oldval = *reinterpret_cast<int *>(dest);
+
+  do {
+    assume = oldval;
+    newval = assume & 0x7fff0000 + ((assume & 0xffff) - val) & 0xffff;
+    oldval = atomicCAS(reinterpret_cast<unsigned int *>(dest), assume, newval);
+  } while (assume != oldval);
+
+  return reinterpret_cast<T>(oldval) & 0xffff;
+}
+
+template <typename T>
+__inline__ __device__ T atomic_fetch_sub(
+    volatile T *const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long long),
+                     const T> &val) {
+  T return_val;
+  int done                 = 0;
+  unsigned int active      = __ballot(1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip_space((void *)dest)) {
+        return_val = *dest;
+        *dest      = return_val - val;
+        Impl::unlock_address_hip_space((void *)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot(done);
+  }
+  return return_val;
+}
+
+// atomic_fetch_or -------------------------------------------------------------
+
+__inline__ __device__ int atomic_fetch_or(volatile int *const dest,
+                                          int const val) {
+  return atomicOr(const_cast<int *>(dest), val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_or(
+    volatile unsigned int *const dest, unsigned int const val) {
+  return atomicOr(const_cast<unsigned int *>(dest), val);
+}
+
+__inline__ __device__ unsigned long long int atomic_fetch_or(
+    volatile unsigned long long int *const dest,
+    unsigned long long int const val) {
+  return atomicOr(const_cast<unsigned long long int *>(dest), val);
+}
+
+// atomic_fetch_and ------------------------------------------------------------
+
+__inline__ __device__ int atomic_fetch_and(volatile int *const dest,
+                                           int const val) {
+  return atomicAnd(const_cast<int *>(dest), val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_and(
+    volatile unsigned int *const dest, unsigned int const val) {
+  return atomicAnd(const_cast<unsigned int *>(dest), val);
+}
+
+__inline__ __device__ unsigned long long int atomic_fetch_and(
+    volatile unsigned long long int *const dest,
+    unsigned long long int const val) {
+  return atomicAnd(const_cast<unsigned long long int *>(dest), val);
+}
+
+namespace Impl {
+
+template <typename T>
+__inline__ __device__ void _atomic_store(T *ptr, T val,
+                                         memory_order_relaxed_t) {
+  (void)atomic_exchange(ptr, val);
+}
+
+template <typename T>
+__inline__ __device__ void _atomic_store(T *ptr, T val,
+                                         memory_order_seq_cst_t) {
+  memory_fence();
+  atomic_store(ptr, val, memory_order_relaxed);
+  memory_fence();
+}
+
+template <typename T>
+__inline__ __device__ void _atomic_store(T *ptr, T val,
+                                         memory_order_release_t) {
+  memory_fence();
+  atomic_store(ptr, val, memory_order_relaxed);
+}
+
+template <typename T>
+__inline__ __device__ void _atomic_store(T *ptr, T val) {
+  atomic_store(ptr, val, memory_order_relaxed);
+}
+
+template <typename T>
+__inline__ __device__ T _atomic_load(T *ptr, memory_order_relaxed_t) {
+  T dummy{};
+  return atomic_compare_exchange(ptr, dummy, dummy);
+}
+
+template <typename T>
+__inline__ __device__ T _atomic_load(T *ptr, memory_order_seq_cst_t) {
+  memory_fence();
+  T rv = atomic_load(ptr, memory_order_relaxed);
+  memory_fence();
+  return rv;
+}
+
+template <typename T>
+__inline__ __device__ T _atomic_load(T *ptr, memory_order_acquire_t) {
+  T rv = atomic_load(ptr, memory_order_relaxed);
+  memory_fence();
+  return rv;
+}
+
+template <typename T>
+__inline__ __device__ T _atomic_load(T *ptr) {
+  return atomic_load(ptr, memory_order_relaxed);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_BlockSize_Deduction.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_BlockSize_Deduction.hpp
new file mode 100644 (file)
index 0000000..87551ae
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_BLOCKSIZE_DEDUCTION_HPP
+#define KOKKOS_HIP_BLOCKSIZE_DEDUCTION_HPP
+
+#include <functional>
+#include <Kokkos_Macros.hpp>
+
+#if defined(__HIPCC__)
+
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+enum class BlockType { Max, Preferred };
+
+template <typename DriverType, typename LaunchBounds = Kokkos::LaunchBounds<>,
+          HIPLaunchMechanism LaunchMechanism =
+              DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
+unsigned get_preferred_blocksize_impl() {
+  // FIXME_HIP - could be if constexpr for c++17
+  if (!HIPParallelLaunch<DriverType, LaunchBounds,
+                         LaunchMechanism>::default_launchbounds()) {
+    // use the user specified value
+    return LaunchBounds::maxTperB;
+  } else {
+    if (HIPParallelLaunch<DriverType, LaunchBounds,
+                          LaunchMechanism>::get_scratch_size() > 0) {
+      return HIPTraits::ConservativeThreadsPerBlock;
+    }
+    return HIPTraits::MaxThreadsPerBlock;
+  }
+}
+
+// FIXME_HIP - entire function could be constexpr for c++17
+template <typename DriverType, typename LaunchBounds = Kokkos::LaunchBounds<>,
+          HIPLaunchMechanism LaunchMechanism =
+              DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
+unsigned get_max_blocksize_impl() {
+  // FIXME_HIP - could be if constexpr for c++17
+  if (!HIPParallelLaunch<DriverType, LaunchBounds,
+                         LaunchMechanism>::default_launchbounds()) {
+    // use the user specified value
+    return LaunchBounds::maxTperB;
+  } else {
+    // we can always fit 1024 threads blocks if we only care about registers
+    // ... and don't mind spilling
+    return HIPTraits::MaxThreadsPerBlock;
+  }
+}
+
+// convenience method to select and return the proper function attributes
+// for a kernel, given the launch bounds et al.
+template <typename DriverType, typename LaunchBounds = Kokkos::LaunchBounds<>,
+          BlockType BlockSize = BlockType::Max,
+          HIPLaunchMechanism LaunchMechanism =
+              DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
+hipFuncAttributes get_hip_func_attributes_impl() {
+#ifndef KOKKOS_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS
+  return HIPParallelLaunch<DriverType, LaunchBounds,
+                           LaunchMechanism>::get_hip_func_attributes();
+#else
+  // FIXME_HIP - could be if constexpr for c++17
+  if (!HIPParallelLaunch<DriverType, LaunchBounds,
+                         LaunchMechanism>::default_launchbounds()) {
+    // for user defined, we *always* honor the request
+    return HIPParallelLaunch<DriverType, LaunchBounds,
+                             LaunchMechanism>::get_hip_func_attributes();
+  } else {
+    // FIXME_HIP - could be if constexpr for c++17
+    if (BlockSize == BlockType::Max) {
+      return HIPParallelLaunch<
+          DriverType, Kokkos::LaunchBounds<HIPTraits::MaxThreadsPerBlock, 1>,
+          LaunchMechanism>::get_hip_func_attributes();
+    } else {
+      const int blocksize =
+          get_preferred_blocksize_impl<DriverType, LaunchBounds,
+                                       LaunchMechanism>();
+      if (blocksize == HIPTraits::MaxThreadsPerBlock) {
+        return HIPParallelLaunch<
+            DriverType, Kokkos::LaunchBounds<HIPTraits::MaxThreadsPerBlock, 1>,
+            LaunchMechanism>::get_hip_func_attributes();
+      } else {
+        return HIPParallelLaunch<
+            DriverType,
+            Kokkos::LaunchBounds<HIPTraits::ConservativeThreadsPerBlock, 1>,
+            LaunchMechanism>::get_hip_func_attributes();
+      }
+    }
+  }
+#endif
+}
+
+// Given an initial block-size limitation based on register usage
+// determine the block size to select based on LDS limitation
+template <BlockType BlockSize, class DriverType, class LaunchBounds,
+          typename ShmemFunctor>
+unsigned hip_internal_get_block_size(const HIPInternal *hip_instance,
+                                     const ShmemFunctor &f,
+                                     const unsigned tperb_reg) {
+  // translate LB from CUDA to HIP
+  const unsigned min_waves_per_eu =
+      LaunchBounds::minBperSM ? LaunchBounds::minBperSM : 1;
+  const unsigned min_threads_per_sm = min_waves_per_eu * HIPTraits::WarpSize;
+  const unsigned shmem_per_sm       = hip_instance->m_shmemPerSM;
+  unsigned block_size               = tperb_reg;
+  do {
+    unsigned total_shmem = f(block_size);
+    // find how many threads we can fit with this blocksize based on LDS usage
+    unsigned tperb_shmem = total_shmem > shmem_per_sm ? 0 : block_size;
+
+    // FIXME_HIP - could be if constexpr for c++17
+    if (BlockSize == BlockType::Max) {
+      // we want the maximum blocksize possible
+      // just wait until we get a case where we can fit the LDS per SM
+      if (tperb_shmem) return block_size;
+    } else {
+      if (block_size == tperb_reg && tperb_shmem >= tperb_reg) {
+        // fast path for exit on first iteration if registers are more limiting
+        // than LDS usage, just use the register limited size
+        return tperb_reg;
+      }
+      // otherwise we need to apply a heuristic to choose the blocksize
+      // the current launchbound selection scheme is:
+      //      1. If no spills, choose 1024 [MaxThreadsPerBlock]
+      //      2. Otherwise, choose 256 [ConservativeThreadsPerBlock]
+      //
+      // For blocksizes between 256 and 1024, we'll be forced to use the 1024 LB
+      // and we'll already have pretty decent occupancy, thus dropping to 256
+      // *probably* isn't a concern
+      const unsigned blocks_per_cu_shmem = shmem_per_sm / total_shmem;
+      const unsigned tperb = tperb_shmem < tperb_reg ? tperb_shmem : tperb_reg;
+
+      // for anything with > 4 WF's & can fit multiple blocks
+      // we're probably not occupancy limited so just return that
+      if (blocks_per_cu_shmem > 1 &&
+          tperb > HIPTraits::ConservativeThreadsPerBlock) {
+        return block_size;
+      }
+
+      // otherwise, it's probably better to drop to the first valid size that
+      // fits in the ConservativeThreadsPerBlock
+      if (tperb >= min_threads_per_sm) return block_size;
+    }
+    block_size >>= 1;
+  } while (block_size >= HIPTraits::WarpSize);
+  // TODO: return a negative, add an error to kernel launch
+  return 0;
+}
+
+// Standardized blocksize deduction for parallel constructs with no LDS usage
+// Returns the preferred blocksize as dictated by register usage
+//
+// Note: a returned block_size of zero indicates that the algorithm could not
+//       find a valid block size.  The caller is responsible for error handling.
+template <typename DriverType, typename LaunchBounds>
+unsigned hip_get_preferred_blocksize() {
+  return get_preferred_blocksize_impl<DriverType, LaunchBounds>();
+}
+
+// Standardized blocksize deduction for parallel constructs with no LDS usage
+// Returns the max blocksize as dictated by register usage
+//
+// Note: a returned block_size of zero indicates that the algorithm could not
+//       find a valid block size.  The caller is responsible for error handling.
+template <typename DriverType, typename LaunchBounds>
+unsigned hip_get_max_blocksize() {
+  return get_max_blocksize_impl<DriverType, LaunchBounds>();
+}
+
+// Standardized blocksize deduction for non-teams parallel constructs with LDS
+// usage Returns the 'preferred' blocksize, as determined by the heuristics in
+// hip_internal_get_block_size
+//
+// The ShmemFunctor takes a single argument of the current blocksize under
+// consideration, and returns the LDS usage
+//
+// Note: a returned block_size of zero indicates that the algorithm could not
+//       find a valid block size.  The caller is responsible for error handling.
+template <typename DriverType, typename LaunchBounds, typename ShmemFunctor>
+unsigned hip_get_preferred_blocksize(HIPInternal const *hip_instance,
+                                     ShmemFunctor const &f) {
+  // get preferred blocksize limited by register usage
+  const unsigned tperb_reg =
+      hip_get_preferred_blocksize<DriverType, LaunchBounds>();
+  return hip_internal_get_block_size<BlockType::Preferred, DriverType,
+                                     LaunchBounds>(hip_instance, f, tperb_reg);
+}
+
+// Standardized blocksize deduction for teams-based parallel constructs with LDS
+// usage Returns the 'preferred' blocksize, as determined by the heuristics in
+// hip_internal_get_block_size
+//
+// The ShmemTeamsFunctor takes two arguments: the hipFunctionAttributes and
+//  the current blocksize under consideration, and returns the LDS usage
+//
+// Note: a returned block_size of zero indicates that the algorithm could not
+//       find a valid block size.  The caller is responsible for error handling.
+template <typename DriverType, typename LaunchBounds,
+          typename ShmemTeamsFunctor>
+unsigned hip_get_preferred_team_blocksize(HIPInternal const *hip_instance,
+                                          ShmemTeamsFunctor const &f) {
+  hipFuncAttributes attr =
+      get_hip_func_attributes_impl<DriverType, LaunchBounds,
+                                   BlockType::Preferred>();
+  // get preferred blocksize limited by register usage
+  const unsigned tperb_reg =
+      hip_get_preferred_blocksize<DriverType, LaunchBounds>();
+  return hip_internal_get_block_size<BlockType::Preferred, DriverType,
+                                     LaunchBounds>(
+      hip_instance, std::bind(f, attr, std::placeholders::_1), tperb_reg);
+}
+
+// Standardized blocksize deduction for non-teams parallel constructs with LDS
+// usage Returns the maximum possible blocksize, as determined by the heuristics
+// in hip_internal_get_block_size
+//
+// The ShmemFunctor takes a single argument of the current blocksize under
+// consideration, and returns the LDS usage
+//
+// Note: a returned block_size of zero indicates that the algorithm could not
+//       find a valid block size.  The caller is responsible for error handling.
+template <typename DriverType, typename LaunchBounds, typename ShmemFunctor>
+unsigned hip_get_max_blocksize(HIPInternal const *hip_instance,
+                               ShmemFunctor const &f) {
+  // get max blocksize limited by register usage
+  const unsigned tperb_reg = hip_get_max_blocksize<DriverType, LaunchBounds>();
+  return hip_internal_get_block_size<BlockType::Max, DriverType, LaunchBounds>(
+      hip_instance, f, tperb_reg);
+}
+
+// Standardized blocksize deduction for teams-based parallel constructs with LDS
+// usage Returns the maximum possible blocksize, as determined by the heuristics
+// in hip_internal_get_block_size
+//
+// The ShmemTeamsFunctor takes two arguments: the hipFunctionAttributes and
+//  the current blocksize under consideration, and returns the LDS usage
+//
+// Note: a returned block_size of zero indicates that the algorithm could not
+//       find a valid block size.  The caller is responsible for error handling.
+template <typename DriverType, typename LaunchBounds,
+          typename ShmemTeamsFunctor>
+unsigned hip_get_max_team_blocksize(HIPInternal const *hip_instance,
+                                    ShmemTeamsFunctor const &f) {
+  hipFuncAttributes attr =
+      get_hip_func_attributes_impl<DriverType, LaunchBounds, BlockType::Max>();
+  // get max blocksize
+  const unsigned tperb_reg = hip_get_max_blocksize<DriverType, LaunchBounds>();
+  return hip_internal_get_block_size<BlockType::Max, DriverType, LaunchBounds>(
+      hip_instance, std::bind(f, attr, std::placeholders::_1), tperb_reg);
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Error.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Error.hpp
new file mode 100644 (file)
index 0000000..a75e7a4
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_ERROR_HPP
+#define KOKKOS_HIP_ERROR_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+#include <hip/hip_runtime.h>
+
+#include <ostream>
+
+namespace Kokkos {
+namespace Impl {
+
+void hip_internal_error_throw(hipError_t e, const char* name,
+                              const char* file = nullptr, const int line = 0);
+
+inline void hip_internal_safe_call(hipError_t e, const char* name,
+                                   const char* file = nullptr,
+                                   const int line   = 0) {
+  if (hipSuccess != e) {
+    hip_internal_error_throw(e, name, file, line);
+  }
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+
+KOKKOS_DEPRECATED
+inline void hip_internal_safe_call_deprecated(hipError_t e, const char* name,
+                                              const char* file = nullptr,
+                                              const int line   = 0) {
+  hip_internal_safe_call(e, name, file, line);
+}
+
+#endif
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#define KOKKOS_IMPL_HIP_SAFE_CALL(call) \
+  Kokkos::Impl::hip_internal_safe_call(call, #call, __FILE__, __LINE__)
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+#define HIP_SAFE_CALL(call)                                              \
+  Kokkos::Impl::hip_internal_safe_call_deprecated(call, #call, __FILE__, \
+                                                  __LINE__)
+
+#endif
+
+namespace Kokkos {
+namespace Experimental {
+
+class HIPRawMemoryAllocationFailure : public RawMemoryAllocationFailure {
+ private:
+  hipError_t m_error_code = hipSuccess;
+
+  static FailureMode get_failure_mode(hipError_t error_code) {
+    switch (error_code) {
+      case hipErrorMemoryAllocation: return FailureMode::OutOfMemoryError;
+      case hipErrorInvalidValue: return FailureMode::InvalidAllocationSize;
+      default: return FailureMode::Unknown;
+    }
+  }
+
+ public:
+  HIPRawMemoryAllocationFailure(size_t arg_attempted_size,
+                                hipError_t arg_error_code,
+                                AllocationMechanism arg_mechanism) noexcept
+      : RawMemoryAllocationFailure(
+            arg_attempted_size, /* HIPSpace doesn't handle alignment? */ 1,
+            get_failure_mode(arg_error_code), arg_mechanism),
+        m_error_code(arg_error_code) {}
+
+  void append_additional_error_information(std::ostream& o) const override {
+    if (m_error_code != hipSuccess) {
+      o << "  The HIP allocation returned the error code \"\""
+        << hipGetErrorName(m_error_code) << "\".";
+    }
+  }
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Half_Conversion.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Half_Conversion.hpp
new file mode 100644 (file)
index 0000000..d04fe20
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_HALF_HPP_
+#define KOKKOS_HIP_HALF_HPP_
+
+#ifdef KOKKOS_IMPL_HALF_TYPE_DEFINED
+
+#include <Kokkos_Half.hpp>
+#include <Kokkos_NumericTraits.hpp>  // reduction_identity
+
+namespace Kokkos {
+namespace Experimental {
+
+/************************** half conversions **********************************/
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(half_t val) { return val; }
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(float val) { return half_t(__float2half(val)); }
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(bool val) { return cast_to_half(static_cast<float>(val)); }
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(double val) {
+  return half_t(__float2half(static_cast<float>(val)));
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(short val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return half_t(__short2half_rn(val));
+#else
+  return half_t(__float2half(static_cast<float>(val)));
+#endif
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned short val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return half_t(__ushort2half_rn(val));
+#else
+  return half_t(__float2half(static_cast<float>(val)));
+#endif
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(int val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return half_t(__int2half_rn(val));
+#else
+  return half_t(__float2half(static_cast<float>(val)));
+#endif
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned int val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return half_t(__uint2half_rn(val));
+#else
+  return half_t(__float2half(static_cast<float>(val)));
+#endif
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long long val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return half_t(__ll2half_rn(val));
+#else
+  return half_t(__float2half(static_cast<float>(val)));
+#endif
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long long val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return half_t(__ull2half_rn(val));
+#else
+  return half_t(__float2half(static_cast<float>(val)));
+#endif
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long val) {
+  return cast_to_half(static_cast<long long>(val));
+}
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long val) {
+  return cast_to_half(static_cast<unsigned long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_half(half_t val) {
+  return __half2float(half_t::impl_type(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, bool>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<float>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_half(half_t val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return __half2short_rz(half_t::impl_type(val));
+#else
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+#endif
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+    cast_from_half(half_t val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return __half2ushort_rz(half_t::impl_type(val));
+#else
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+#endif
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_half(half_t val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return __half2int_rz(half_t::impl_type(val));
+#else
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+#endif
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned>::value, T>
+cast_from_half(half_t val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return __half2uint_rz(half_t::impl_type(val));
+#else
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+#endif
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_half(half_t val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return __half2ll_rz(half_t::impl_type(val));
+#else
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+#endif
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+    cast_from_half(half_t val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  return __half2ull_rz(half_t::impl_type(val));
+#else
+  return static_cast<T>(__half2float(half_t::impl_type(val)));
+#endif
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<long long>(val));
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+    cast_from_half(half_t val) {
+  return static_cast<T>(cast_from_half<unsigned long long>(val));
+}
+}  // namespace Experimental
+
+// use float as the return type for sum and prod since hip_fp16.h
+// has no constexpr functions for casting to __half
+template <>
+struct reduction_identity<Kokkos::Experimental::half_t> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float sum() noexcept {
+    return 0.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float prod() noexcept {
+    return 1.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float max() noexcept {
+    return -65504.0F;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float min() noexcept {
+    return 65504.0F;
+  }
+};
+
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Half_Impl_Type.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Half_Impl_Type.hpp
new file mode 100644 (file)
index 0000000..728a229
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_HALF_IMPL_TYPE_HPP_
+#define KOKKOS_HIP_HALF_IMPL_TYPE_HPP_
+
+#include <hip/hip_fp16.h>
+
+#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
+// Make sure no one else tries to define half_t
+#define KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_HIP_HALF_TYPE_DEFINED
+
+namespace Kokkos {
+namespace Impl {
+struct half_impl_t {
+  using type = __half;
+};
+}  // namespace Impl
+}  // namespace Kokkos
+#endif  // KOKKOS_IMPL_HALF_TYPE_DEFINED
+#endif  // KOKKOS_ENABLE_HIP
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Instance.cpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Instance.cpp
new file mode 100644 (file)
index 0000000..3785cfe
--- /dev/null
@@ -0,0 +1,569 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/*--------------------------------------------------------------------------*/
+/* Kokkos interfaces */
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <Kokkos_HIP.hpp>
+#include <Kokkos_HIP_Space.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+/*--------------------------------------------------------------------------*/
+/* Standard 'C' libraries */
+#include <stdlib.h>
+
+/* Standard 'C++' libraries */
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+__device__ __constant__ unsigned long kokkos_impl_hip_constant_memory_buffer
+    [Kokkos::Experimental::Impl::HIPTraits::ConstantMemoryUsage /
+     sizeof(unsigned long)];
+#endif
+
+namespace Kokkos {
+namespace Impl {
+Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>
+hip_global_unique_token_locks(bool deallocate) {
+  static Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace> locks =
+      Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>();
+  if (!deallocate && locks.extent(0) == 0)
+    locks = Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>(
+        "Kokkos::UniqueToken<HIP>::m_locks",
+        Kokkos::Experimental::HIP().concurrency());
+  if (deallocate)
+    locks = Kokkos::View<uint32_t *, Kokkos::Experimental::HIPSpace>();
+  return locks;
+}
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+namespace {
+class HIPInternalDevices {
+ public:
+  enum { MAXIMUM_DEVICE_COUNT = 64 };
+  struct hipDeviceProp_t m_hipProp[MAXIMUM_DEVICE_COUNT];
+  int m_hipDevCount;
+
+  HIPInternalDevices();
+
+  static HIPInternalDevices const &singleton();
+};
+
+HIPInternalDevices::HIPInternalDevices() {
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipGetDeviceCount(&m_hipDevCount));
+
+  if (m_hipDevCount > MAXIMUM_DEVICE_COUNT) {
+    Kokkos::abort(
+        "Sorry, you have more GPUs per node than we thought anybody would ever "
+        "have. Please report this to github.com/kokkos/kokkos.");
+  }
+  for (int i = 0; i < m_hipDevCount; ++i) {
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipGetDeviceProperties(m_hipProp + i, i));
+  }
+}
+
+const HIPInternalDevices &HIPInternalDevices::singleton() {
+  static HIPInternalDevices self;
+  return self;
+}
+}  // namespace
+
+unsigned long *Impl::HIPInternal::constantMemHostStaging = nullptr;
+hipEvent_t Impl::HIPInternal::constantMemReusable        = nullptr;
+std::mutex Impl::HIPInternal::constantMemMutex;
+
+namespace Impl {
+
+//----------------------------------------------------------------------------
+
+void HIPInternal::print_configuration(std::ostream &s) const {
+  const HIPInternalDevices &dev_info = HIPInternalDevices::singleton();
+
+  s << "macro  KOKKOS_ENABLE_HIP : defined" << '\n';
+#if defined(HIP_VERSION)
+  s << "macro  HIP_VERSION = " << HIP_VERSION << " = version "
+    << HIP_VERSION_MAJOR << '.' << HIP_VERSION_MINOR << '.' << HIP_VERSION_PATCH
+    << '\n';
+#endif
+
+  for (int i = 0; i < dev_info.m_hipDevCount; ++i) {
+    s << "Kokkos::Experimental::HIP[ " << i << " ] "
+      << dev_info.m_hipProp[i].name << " version "
+      << (dev_info.m_hipProp[i].major) << "." << dev_info.m_hipProp[i].minor
+      << ", Total Global Memory: "
+      << ::Kokkos::Impl::human_memory_size(dev_info.m_hipProp[i].totalGlobalMem)
+      << ", Shared Memory per Block: "
+      << ::Kokkos::Impl::human_memory_size(
+             dev_info.m_hipProp[i].sharedMemPerBlock);
+    if (m_hipDev == i) s << " : Selected";
+    s << '\n';
+  }
+}
+
+//----------------------------------------------------------------------------
+
+HIPInternal::~HIPInternal() {
+  if (m_scratchSpace || m_scratchFlags) {
+    std::cerr << "Kokkos::Experimental::HIP ERROR: Failed to call "
+                 "Kokkos::Experimental::HIP::finalize()"
+              << std::endl;
+    std::cerr.flush();
+  }
+
+  m_hipDev            = -1;
+  m_hipArch           = -1;
+  m_multiProcCount    = 0;
+  m_maxWarpCount      = 0;
+  m_maxSharedWords    = 0;
+  m_maxShmemPerBlock  = 0;
+  m_scratchSpaceCount = 0;
+  m_scratchFlagsCount = 0;
+  m_scratchSpace      = nullptr;
+  m_scratchFlags      = nullptr;
+  m_stream            = nullptr;
+}
+
+int HIPInternal::verify_is_initialized(const char *const label) const {
+  if (m_hipDev < 0) {
+    Kokkos::abort((std::string("Kokkos::Experimental::HIP::") + label +
+                   " : ERROR device not initialized\n")
+                      .c_str());
+  }
+  return 0 <= m_hipDev;
+}
+
+uint32_t HIPInternal::impl_get_instance_id() const noexcept {
+  return m_instance_id;
+}
+HIPInternal &HIPInternal::singleton() {
+  static HIPInternal *self = nullptr;
+  if (!self) {
+    self = new HIPInternal();
+  }
+  return *self;
+}
+
+void HIPInternal::fence() const {
+  fence("Kokkos::HIPInternal::fence: Unnamed Internal Fence");
+}
+void HIPInternal::fence(const std::string &name) const {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::HIP>(
+      name,
+      Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
+          impl_get_instance_id()},
+      [&]() {
+        KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(m_stream));
+        // can reset our cycle id now as well
+        m_cycleId = 0;
+      });
+}
+
+void HIPInternal::initialize(int hip_device_id, hipStream_t stream,
+                             bool manage_stream) {
+  if (was_finalized)
+    Kokkos::abort("Calling HIP::initialize after HIP::finalize is illegal\n");
+
+  if (is_initialized()) return;
+
+  int constexpr WordSize = sizeof(size_type);
+
+  if (!HostSpace::execution_space::impl_is_initialized()) {
+    const std::string msg(
+        "HIP::initialize ERROR : HostSpace::execution_space "
+        "is not initialized");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+
+  const HIPInternalDevices &dev_info = HIPInternalDevices::singleton();
+
+  const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
+
+  // Need at least a GPU device
+  const bool ok_id =
+      0 <= hip_device_id && hip_device_id < dev_info.m_hipDevCount;
+
+  if (ok_init && ok_id) {
+    const struct hipDeviceProp_t &hipProp = dev_info.m_hipProp[hip_device_id];
+
+    m_hipDev     = hip_device_id;
+    m_deviceProp = hipProp;
+
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipSetDevice(m_hipDev));
+
+    m_stream                    = stream;
+    m_manage_stream             = manage_stream;
+    m_team_scratch_current_size = 0;
+    m_team_scratch_ptr          = nullptr;
+
+    // number of multiprocessors
+    m_multiProcCount = hipProp.multiProcessorCount;
+
+    //----------------------------------
+    // Maximum number of warps,
+    // at most one warp per thread in a warp for reduction.
+    m_maxWarpCount = hipProp.maxThreadsPerBlock / Impl::HIPTraits::WarpSize;
+    if (HIPTraits::WarpSize < m_maxWarpCount) {
+      m_maxWarpCount = Impl::HIPTraits::WarpSize;
+    }
+    m_maxSharedWords = hipProp.sharedMemPerBlock / WordSize;
+
+    //----------------------------------
+    // Maximum number of blocks
+    m_maxBlock[0] = hipProp.maxGridSize[0];
+    m_maxBlock[1] = hipProp.maxGridSize[1];
+    m_maxBlock[2] = hipProp.maxGridSize[2];
+
+    // theoretically, we can get 40 WF's / CU, but only can sustain 32
+    // see
+    // https://github.com/ROCm-Developer-Tools/HIP/blob/a0b5dfd625d99af7e288629747b40dd057183173/vdi/hip_platform.cpp#L742
+    m_maxWavesPerCU = 32;
+    // FIXME_HIP - Nick to implement this upstream
+    //             Register count comes from Sec. 2.2. "Data Sharing" of the
+    //             Vega 7nm ISA document (see the diagram)
+    //             https://developer.amd.com/wp-content/resources/Vega_7nm_Shader_ISA.pdf
+    //             VGPRS = 4 (SIMD/CU) * 256 VGPR/SIMD * 64 registers / VGPR =
+    //             65536 VGPR/CU
+    m_regsPerSM        = 65536;
+    m_shmemPerSM       = hipProp.maxSharedMemoryPerMultiProcessor;
+    m_maxShmemPerBlock = hipProp.sharedMemPerBlock;
+    m_maxThreadsPerSM  = m_maxWavesPerCU * HIPTraits::WarpSize;
+    //----------------------------------
+    // Multiblock reduction uses scratch flags for counters
+    // and scratch space for partial reduction values.
+    // Allocate some initial space.  This will grow as needed.
+    {
+      const unsigned reduce_block_count =
+          m_maxWarpCount * Impl::HIPTraits::WarpSize;
+
+      (void)scratch_flags(reduce_block_count * 2 * sizeof(size_type));
+      (void)scratch_space(reduce_block_count * 16 * sizeof(size_type));
+    }
+    //----------------------------------
+    // Concurrent bitset for obtaining unique tokens from within
+    // an executing kernel.
+    {
+      const int32_t buffer_bound =
+          Kokkos::Impl::concurrent_bitset::buffer_bound(HIP::concurrency());
+
+      // Allocate and initialize uint32_t[ buffer_bound ]
+
+      using Record =
+          Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
+                                               void>;
+
+      Record *const r = Record::allocate(Kokkos::Experimental::HIPSpace(),
+                                         "Kokkos::InternalScratchBitset",
+                                         sizeof(uint32_t) * buffer_bound);
+
+      Record::increment(r);
+    }
+    //----------------------------------
+
+  } else {
+    std::ostringstream msg;
+    msg << "Kokkos::Experimental::HIP::initialize(" << hip_device_id
+        << ") FAILED";
+
+    if (!ok_init) {
+      msg << " : Already initialized";
+    }
+    if (!ok_id) {
+      msg << " : Device identifier out of range "
+          << "[0.." << dev_info.m_hipDevCount - 1 << "]";
+    }
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+
+  // Init the array for used for arbitrarily sized atomics
+  if (m_stream == nullptr) ::Kokkos::Impl::initialize_host_hip_lock_arrays();
+
+  // Allocate a staging buffer for constant mem in pinned host memory
+  // and an event to avoid overwriting driver for previous kernel launches
+  if (m_stream == nullptr) {
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipHostMalloc((void **)&constantMemHostStaging,
+                                            HIPTraits::ConstantMemoryUsage));
+
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipEventCreate(&constantMemReusable));
+  }
+
+  KOKKOS_IMPL_HIP_SAFE_CALL(
+      hipMalloc(&m_scratch_locks, sizeof(int32_t) * HIP::concurrency()));
+  KOKKOS_IMPL_HIP_SAFE_CALL(
+      hipMemset(m_scratch_locks, 0, sizeof(int32_t) * HIP::concurrency()));
+}
+
+//----------------------------------------------------------------------------
+
+using ScratchGrain =
+    Kokkos::Experimental::HIP::size_type[Impl::HIPTraits::WarpSize];
+enum { sizeScratchGrain = sizeof(ScratchGrain) };
+
+Kokkos::Experimental::HIP::size_type *HIPInternal::scratch_space(
+    const std::size_t size) {
+  if (verify_is_initialized("scratch_space") &&
+      m_scratchSpaceCount * sizeScratchGrain < size) {
+    m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
+
+    using Record =
+        Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
+                                             void>;
+
+    if (m_scratchSpace) Record::decrement(Record::get_record(m_scratchSpace));
+
+    Record *const r = Record::allocate(
+        Kokkos::Experimental::HIPSpace(), "Kokkos::InternalScratchSpace",
+        (sizeScratchGrain * m_scratchSpaceCount));
+
+    Record::increment(r);
+
+    m_scratchSpace = reinterpret_cast<size_type *>(r->data());
+  }
+
+  return m_scratchSpace;
+}
+
+Kokkos::Experimental::HIP::size_type *HIPInternal::scratch_flags(
+    const std::size_t size) {
+  if (verify_is_initialized("scratch_flags") &&
+      m_scratchFlagsCount * sizeScratchGrain < size) {
+    m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
+
+    using Record =
+        Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
+                                             void>;
+
+    if (m_scratchFlags) Record::decrement(Record::get_record(m_scratchFlags));
+
+    Record *const r = Record::allocate(
+        Kokkos::Experimental::HIPSpace(), "Kokkos::InternalScratchFlags",
+        (sizeScratchGrain * m_scratchFlagsCount));
+
+    Record::increment(r);
+
+    m_scratchFlags = reinterpret_cast<size_type *>(r->data());
+
+    KOKKOS_IMPL_HIP_SAFE_CALL(
+        hipMemset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain));
+  }
+
+  return m_scratchFlags;
+}
+
+void *HIPInternal::resize_team_scratch_space(std::int64_t bytes,
+                                             bool force_shrink) {
+  if (m_team_scratch_current_size == 0) {
+    m_team_scratch_current_size = bytes;
+    m_team_scratch_ptr = Kokkos::kokkos_malloc<Kokkos::Experimental::HIPSpace>(
+        "Kokkos::HIPSpace::TeamScratchMemory", m_team_scratch_current_size);
+  }
+  if ((bytes > m_team_scratch_current_size) ||
+      ((bytes < m_team_scratch_current_size) && (force_shrink))) {
+    m_team_scratch_current_size = bytes;
+    m_team_scratch_ptr = Kokkos::kokkos_realloc<Kokkos::Experimental::HIPSpace>(
+        m_team_scratch_ptr, m_team_scratch_current_size);
+  }
+  return m_team_scratch_ptr;
+}
+
+//----------------------------------------------------------------------------
+
+void HIPInternal::finalize() {
+  this->fence("Kokkos::HIPInternal::finalize: fence on finalization");
+  was_finalized = true;
+
+  if (this == &singleton()) {
+    (void)Kokkos::Impl::hip_global_unique_token_locks(true);
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(constantMemHostStaging));
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipEventDestroy(constantMemReusable));
+  }
+
+  if (nullptr != m_scratchSpace || nullptr != m_scratchFlags) {
+    using RecordHIP =
+        Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::HIPSpace>;
+
+    RecordHIP::decrement(RecordHIP::get_record(m_scratchFlags));
+    RecordHIP::decrement(RecordHIP::get_record(m_scratchSpace));
+
+    if (m_team_scratch_current_size > 0)
+      Kokkos::kokkos_free<Kokkos::Experimental::HIPSpace>(m_team_scratch_ptr);
+
+    if (m_manage_stream && m_stream != nullptr)
+      KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamDestroy(m_stream));
+  }
+
+  m_hipDev                    = -1;
+  m_hipArch                   = -1;
+  m_multiProcCount            = 0;
+  m_maxWarpCount              = 0;
+  m_maxBlock                  = {0, 0, 0};
+  m_maxSharedWords            = 0;
+  m_maxShmemPerBlock          = 0;
+  m_scratchSpaceCount         = 0;
+  m_scratchFlagsCount         = 0;
+  m_scratchSpace              = nullptr;
+  m_scratchFlags              = nullptr;
+  m_stream                    = nullptr;
+  m_team_scratch_current_size = 0;
+  m_team_scratch_ptr          = nullptr;
+
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(m_scratch_locks));
+  m_scratch_locks = nullptr;
+
+  if (nullptr != d_driverWorkArray) {
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(d_driverWorkArray));
+    d_driverWorkArray = nullptr;
+  }
+}
+
+char *HIPInternal::get_next_driver(size_t driverTypeSize) const {
+  if (d_driverWorkArray == nullptr) {
+    KOKKOS_IMPL_HIP_SAFE_CALL(
+        hipHostMalloc(&d_driverWorkArray,
+                      m_maxDriverCycles * m_maxDriverTypeSize * sizeof(char),
+                      hipHostMallocNonCoherent));
+  }
+  if (driverTypeSize > m_maxDriverTypeSize) {
+    // fence handles the cycle id reset for us
+    fence(
+        "Kokkos::HIPInternal::get_next_driver: fence before reallocating "
+        "resources");
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(d_driverWorkArray));
+    m_maxDriverTypeSize = driverTypeSize;
+    if (m_maxDriverTypeSize % 128 != 0)
+      m_maxDriverTypeSize =
+          m_maxDriverTypeSize + 128 - m_maxDriverTypeSize % 128;
+    KOKKOS_IMPL_HIP_SAFE_CALL(
+        hipHostMalloc(&d_driverWorkArray,
+                      m_maxDriverCycles * m_maxDriverTypeSize * sizeof(char),
+                      hipHostMallocNonCoherent));
+  } else {
+    m_cycleId = (m_cycleId + 1) % m_maxDriverCycles;
+    if (m_cycleId == 0) {
+      // ensure any outstanding kernels are completed before we wrap around
+      fence(
+          "Kokkos::HIPInternal::get_next_driver: fence before reusing first "
+          "driver");
+    }
+  }
+  return &d_driverWorkArray[m_maxDriverTypeSize * m_cycleId];
+}
+
+//----------------------------------------------------------------------------
+
+Kokkos::Experimental::HIP::size_type hip_internal_multiprocessor_count() {
+  return HIPInternal::singleton().m_multiProcCount;
+}
+
+Kokkos::Experimental::HIP::size_type hip_internal_maximum_warp_count() {
+  return HIPInternal::singleton().m_maxWarpCount;
+}
+
+std::array<Kokkos::Experimental::HIP::size_type, 3>
+hip_internal_maximum_grid_count() {
+  return HIPInternal::singleton().m_maxBlock;
+}
+
+Kokkos::Experimental::HIP::size_type *hip_internal_scratch_space(
+    const HIP &instance, const std::size_t size) {
+  return instance.impl_internal_space_instance()->scratch_space(size);
+}
+
+Kokkos::Experimental::HIP::size_type *hip_internal_scratch_flags(
+    const HIP &instance, const std::size_t size) {
+  return instance.impl_internal_space_instance()->scratch_flags(size);
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+void hip_device_synchronize(const std::string &name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::HIP>(
+      name,
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          GlobalDeviceSynchronization,
+      [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceSynchronize()); });
+}
+
+void hip_internal_error_throw(hipError_t e, const char *name, const char *file,
+                              const int line) {
+  std::ostringstream out;
+  out << name << " error( " << hipGetErrorName(e)
+      << "): " << hipGetErrorString(e);
+  if (file) {
+    out << " " << file << ":" << line;
+  }
+  throw_runtime_exception(out.str());
+}
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Experimental {
+HIP::size_type HIP::detect_device_count() {
+  return HIPInternalDevices::singleton().m_hipDevCount;
+}
+}  // namespace Experimental
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Instance.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Instance.hpp
new file mode 100644 (file)
index 0000000..d8f2658
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/*--------------------------------------------------------------------------*/
+
+#ifndef KOKKOS_HIP_INSTANCE_HPP
+#define KOKKOS_HIP_INSTANCE_HPP
+
+#include <Kokkos_HIP_Space.hpp>
+#include <HIP/Kokkos_HIP_Error.hpp>
+
+#include <mutex>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+struct HIPTraits {
+  static int constexpr WarpSize       = 64;
+  static int constexpr WarpIndexMask  = 0x003f; /* hexadecimal for 63 */
+  static int constexpr WarpIndexShift = 6;      /* WarpSize == 1 << WarpShift*/
+  static int constexpr ConservativeThreadsPerBlock =
+      256;  // conservative fallback blocksize in case of spills
+  static int constexpr MaxThreadsPerBlock =
+      1024;  // the maximum we can fit in a block
+  static int constexpr ConstantMemoryUsage        = 0x008000; /* 32k bytes */
+  static int constexpr KernelArgumentLimit        = 0x001000; /*  4k bytes */
+  static int constexpr ConstantMemoryUseThreshold = 0x000200; /* 512 bytes */
+};
+
+//----------------------------------------------------------------------------
+
+HIP::size_type hip_internal_maximum_warp_count();
+std::array<HIP::size_type, 3> hip_internal_maximum_grid_count();
+HIP::size_type hip_internal_multiprocessor_count();
+
+HIP::size_type *hip_internal_scratch_space(const HIP &instance,
+                                           const std::size_t size);
+HIP::size_type *hip_internal_scratch_flags(const HIP &instance,
+                                           const std::size_t size);
+
+//----------------------------------------------------------------------------
+
+class HIPInternal {
+ private:
+  HIPInternal(const HIPInternal &);
+  HIPInternal &operator=(const HIPInternal &);
+
+ public:
+  using size_type = ::Kokkos::Experimental::HIP::size_type;
+
+  int m_hipDev                        = -1;
+  int m_hipArch                       = -1;
+  unsigned m_multiProcCount           = 0;
+  unsigned m_maxWarpCount             = 0;
+  std::array<size_type, 3> m_maxBlock = {0, 0, 0};
+  unsigned m_maxWavesPerCU            = 0;
+  unsigned m_maxSharedWords           = 0;
+  int m_regsPerSM;
+  int m_shmemPerSM       = 0;
+  int m_maxShmemPerBlock = 0;
+  int m_maxThreadsPerSM  = 0;
+
+  // array of DriverTypes to be allocated in host-pinned memory for async
+  // kernel launches
+  mutable char *d_driverWorkArray = nullptr;
+  // number of kernel launches that can be in-flight w/o synchronization
+  const int m_maxDriverCycles = 100;
+  // max size of a DriverType [bytes]
+  mutable size_t m_maxDriverTypeSize = 1024 * 10;
+  // the current index in the driverWorkArray
+  mutable int m_cycleId = 0;
+  // mutex to access d_driverWorkArray
+  mutable std::mutex m_mutexWorkArray;
+  // mutex to access shared memory
+  mutable std::mutex m_mutexSharedMemory;
+
+  // Scratch Spaces for Reductions
+  std::size_t m_scratchSpaceCount = 0;
+  std::size_t m_scratchFlagsCount = 0;
+
+  size_type *m_scratchSpace = nullptr;
+  size_type *m_scratchFlags = nullptr;
+
+  hipDeviceProp_t m_deviceProp;
+
+  hipStream_t m_stream   = nullptr;
+  uint32_t m_instance_id = Kokkos::Tools::Experimental::Impl::idForInstance<
+      Kokkos::Experimental::HIP>(reinterpret_cast<uintptr_t>(this));
+  bool m_manage_stream = false;
+
+  // Team Scratch Level 1 Space
+  mutable int64_t m_team_scratch_current_size = 0;
+  mutable void *m_team_scratch_ptr            = nullptr;
+  mutable std::mutex m_team_scratch_mutex;
+  std::int32_t *m_scratch_locks;
+
+  bool was_finalized = false;
+
+  // FIXME_HIP: these want to be per-device, not per-stream...  use of 'static'
+  // here will break once there are multiple devices though
+  static unsigned long *constantMemHostStaging;
+  static hipEvent_t constantMemReusable;
+  static std::mutex constantMemMutex;
+
+  static HIPInternal &singleton();
+
+  int verify_is_initialized(const char *const label) const;
+
+  int is_initialized() const { return m_hipDev >= 0; }
+
+  void initialize(int hip_device_id, hipStream_t stream = nullptr,
+                  bool manage_stream = false);
+  void finalize();
+
+  void print_configuration(std::ostream &) const;
+
+  void fence() const;
+  void fence(const std::string &) const;
+
+  // returns the next driver type pointer in our work array
+  char *get_next_driver(size_t driverTypeSize) const;
+
+  ~HIPInternal();
+
+  HIPInternal() = default;
+
+  // Resizing of reduction related scratch spaces
+  size_type *scratch_space(const std::size_t size);
+  size_type *scratch_flags(const std::size_t size);
+  uint32_t impl_get_instance_id() const noexcept;
+  // Resizing of team level 1 scratch
+  void *resize_team_scratch_space(std::int64_t bytes,
+                                  bool force_shrink = false);
+};
+
+}  // namespace Impl
+
+// Partitioning an Execution Space: expects space and integer arguments for
+// relative weight
+//   Customization point for backends
+//   Default behavior is to return the passed in instance
+
+namespace Impl {
+inline void create_HIP_instances(std::vector<HIP> &instances) {
+  for (int s = 0; s < int(instances.size()); s++) {
+    hipStream_t stream;
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamCreate(&stream));
+    instances[s] = HIP(stream, true);
+  }
+}
+}  // namespace Impl
+
+template <class... Args>
+std::vector<HIP> partition_space(const HIP &, Args...) {
+#ifdef __cpp_fold_expressions
+  static_assert(
+      (... && std::is_arithmetic_v<Args>),
+      "Kokkos Error: partitioning arguments must be integers or floats");
+#endif
+
+  std::vector<HIP> instances(sizeof...(Args));
+  Impl::create_HIP_instances(instances);
+  return instances;
+}
+
+template <class T>
+std::vector<HIP> partition_space(const HIP &, std::vector<T> &weights) {
+  static_assert(
+      std::is_arithmetic<T>::value,
+      "Kokkos Error: partitioning arguments must be integers or floats");
+
+  std::vector<HIP> instances(weights.size());
+  Impl::create_HIP_instances(instances);
+  return instances;
+}
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_KernelLaunch.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_KernelLaunch.hpp
new file mode 100644 (file)
index 0000000..70b979e
--- /dev/null
@@ -0,0 +1,555 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_KERNEL_LAUNCH_HPP
+#define KOKKOS_HIP_KERNEL_LAUNCH_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(__HIPCC__)
+
+#include <HIP/Kokkos_HIP_Error.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <Kokkos_HIP_Space.hpp>
+#include <HIP/Kokkos_HIP_Locks.hpp>
+
+// Must use global variable on the device with HIP-Clang
+#ifdef __HIP__
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+__device__ __constant__ extern unsigned long
+    kokkos_impl_hip_constant_memory_buffer[];
+#else
+__device__ __constant__ unsigned long kokkos_impl_hip_constant_memory_buffer
+    [Kokkos::Experimental::Impl::HIPTraits::ConstantMemoryUsage /
+     sizeof(unsigned long)];
+#endif
+#endif
+
+namespace Kokkos {
+namespace Experimental {
+template <typename T>
+inline __device__ T *kokkos_impl_hip_shared_memory() {
+  extern __shared__ Kokkos::Experimental::HIPSpace::size_type sh[];
+  return (T *)sh;
+}
+}  // namespace Experimental
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+// The hip_parallel_launch_*_memory code is identical to the cuda code
+template <typename DriverType>
+__global__ static void hip_parallel_launch_constant_memory() {
+  const DriverType &driver = *(reinterpret_cast<const DriverType *>(
+      kokkos_impl_hip_constant_memory_buffer));
+
+  driver();
+}
+
+template <typename DriverType, unsigned int maxTperB, unsigned int minBperSM>
+__global__ __launch_bounds__(
+    maxTperB, minBperSM) static void hip_parallel_launch_constant_memory() {
+  const DriverType &driver = *(reinterpret_cast<const DriverType *>(
+      kokkos_impl_hip_constant_memory_buffer));
+
+  driver();
+}
+
+template <class DriverType>
+__global__ static void hip_parallel_launch_local_memory(
+    const DriverType *driver) {
+  // FIXME_HIP driver() pass by copy
+  driver->operator()();
+}
+
+template <class DriverType, unsigned int maxTperB, unsigned int minBperSM>
+__global__ __launch_bounds__(
+    maxTperB,
+    minBperSM) static void hip_parallel_launch_local_memory(const DriverType
+                                                                *driver) {
+  // FIXME_HIP driver() pass by copy
+  driver->operator()();
+}
+
+template <typename DriverType>
+__global__ static void hip_parallel_launch_global_memory(
+    const DriverType *driver) {
+  driver->operator()();
+}
+
+template <typename DriverType, unsigned int maxTperB, unsigned int minBperSM>
+__global__ __launch_bounds__(
+    maxTperB,
+    minBperSM) static void hip_parallel_launch_global_memory(const DriverType
+                                                                 *driver) {
+  driver->operator()();
+}
+
+enum class HIPLaunchMechanism : unsigned {
+  Default        = 0,
+  ConstantMemory = 1,
+  GlobalMemory   = 2,
+  LocalMemory    = 4
+};
+
+constexpr inline HIPLaunchMechanism operator|(HIPLaunchMechanism p1,
+                                              HIPLaunchMechanism p2) {
+  return static_cast<HIPLaunchMechanism>(static_cast<unsigned>(p1) |
+                                         static_cast<unsigned>(p2));
+}
+constexpr inline HIPLaunchMechanism operator&(HIPLaunchMechanism p1,
+                                              HIPLaunchMechanism p2) {
+  return static_cast<HIPLaunchMechanism>(static_cast<unsigned>(p1) &
+                                         static_cast<unsigned>(p2));
+}
+
+template <HIPLaunchMechanism l>
+struct HIPDispatchProperties {
+  HIPLaunchMechanism launch_mechanism = l;
+};
+
+// Use local memory up to ConstantMemoryUseThreshold
+// Use global memory above ConstantMemoryUsage
+// In between use ConstantMemory
+// The following code is identical to the cuda code
+template <typename DriverType>
+struct DeduceHIPLaunchMechanism {
+  static constexpr Kokkos::Experimental::WorkItemProperty::HintLightWeight_t
+      light_weight = Kokkos::Experimental::WorkItemProperty::HintLightWeight;
+  static constexpr Kokkos::Experimental::WorkItemProperty::HintHeavyWeight_t
+      heavy_weight = Kokkos::Experimental::WorkItemProperty::HintHeavyWeight;
+  static constexpr typename DriverType::Policy::work_item_property property =
+      typename DriverType::Policy::work_item_property();
+
+  static constexpr HIPLaunchMechanism valid_launch_mechanism =
+      // BuildValidMask
+      (sizeof(DriverType) < HIPTraits::KernelArgumentLimit
+           ? HIPLaunchMechanism::LocalMemory
+           : HIPLaunchMechanism::Default) |
+      (sizeof(DriverType) < HIPTraits::ConstantMemoryUsage
+           ? HIPLaunchMechanism::ConstantMemory
+           : HIPLaunchMechanism::Default) |
+      HIPLaunchMechanism::GlobalMemory;
+
+  static constexpr HIPLaunchMechanism requested_launch_mechanism =
+      (((property & light_weight) == light_weight)
+           ? HIPLaunchMechanism::LocalMemory
+           : HIPLaunchMechanism::ConstantMemory) |
+      HIPLaunchMechanism::GlobalMemory;
+
+  static constexpr HIPLaunchMechanism default_launch_mechanism =
+      // BuildValidMask
+      (sizeof(DriverType) < HIPTraits::ConstantMemoryUseThreshold)
+          ? HIPLaunchMechanism::LocalMemory
+          : ((sizeof(DriverType) < HIPTraits::ConstantMemoryUsage)
+                 ? HIPLaunchMechanism::ConstantMemory
+                 : HIPLaunchMechanism::GlobalMemory);
+
+  //              None                LightWeight    HeavyWeight
+  // F<UseT       LCG  LCG L  L       LCG  LG L  L   LCG  CG L  C
+  // UseT<F<KAL   LCG  LCG C  C       LCG  LG C  L   LCG  CG C  C
+  // Kal<F<CMU     CG  LCG C  C        CG  LG C  G    CG  CG C  C
+  // CMU<F          G  LCG G  G         G  LG G  G     G  CG G  G
+  static constexpr HIPLaunchMechanism launch_mechanism =
+      ((property & light_weight) == light_weight)
+          ? (sizeof(DriverType) < HIPTraits::KernelArgumentLimit
+                 ? HIPLaunchMechanism::LocalMemory
+                 : HIPLaunchMechanism::GlobalMemory)
+          : (((property & heavy_weight) == heavy_weight)
+                 ? (sizeof(DriverType) < HIPTraits::ConstantMemoryUsage
+                        ? HIPLaunchMechanism::ConstantMemory
+                        : HIPLaunchMechanism::GlobalMemory)
+                 : (default_launch_mechanism));
+};
+
+template <typename DriverType, typename LaunchBounds,
+          HIPLaunchMechanism LaunchMechanism>
+struct HIPParallelLaunchKernelFuncData {
+  static unsigned int get_scratch_size(
+      hipFuncAttributes const &hip_func_attributes) {
+    return hip_func_attributes.localSizeBytes;
+  }
+
+  static hipFuncAttributes get_hip_func_attributes(void const *kernel_func) {
+    static hipFuncAttributes attr = [=]() {
+      hipFuncAttributes attr;
+      KOKKOS_IMPL_HIP_SAFE_CALL(hipFuncGetAttributes(&attr, kernel_func));
+      return attr;
+    }();
+    return attr;
+  }
+};
+
+//---------------------------------------------------------------//
+// HIPParallelLaunchKernelFunc structure and its specializations //
+//---------------------------------------------------------------//
+template <typename DriverType, typename LaunchBounds,
+          HIPLaunchMechanism LaunchMechanism>
+struct HIPParallelLaunchKernelFunc;
+
+// HIPLaunchMechanism::LocalMemory specializations
+template <typename DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM>
+struct HIPParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    HIPLaunchMechanism::LocalMemory> {
+  using funcdata_t = HIPParallelLaunchKernelFuncData<
+      DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+      HIPLaunchMechanism::LocalMemory>;
+  static auto get_kernel_func() {
+    return hip_parallel_launch_local_memory<DriverType, MaxThreadsPerBlock,
+                                            MinBlocksPerSM>;
+  }
+
+  static constexpr auto default_launchbounds() { return false; }
+
+  static auto get_scratch_size() {
+    return funcdata_t::get_scratch_size(get_hip_func_attributes());
+  }
+
+  static hipFuncAttributes get_hip_func_attributes() {
+    return funcdata_t::get_hip_func_attributes(
+        reinterpret_cast<void const *>(get_kernel_func()));
+  }
+};
+
+template <typename DriverType>
+struct HIPParallelLaunchKernelFunc<DriverType, Kokkos::LaunchBounds<0, 0>,
+                                   HIPLaunchMechanism::LocalMemory> {
+  using funcdata_t =
+      HIPParallelLaunchKernelFuncData<DriverType, Kokkos::LaunchBounds<0, 0>,
+                                      HIPLaunchMechanism::LocalMemory>;
+  static auto get_kernel_func() {
+    return HIPParallelLaunchKernelFunc<
+        DriverType, Kokkos::LaunchBounds<HIPTraits::MaxThreadsPerBlock, 1>,
+        HIPLaunchMechanism::LocalMemory>::get_kernel_func();
+  }
+
+  static constexpr auto default_launchbounds() { return true; }
+
+  static auto get_scratch_size() {
+    return funcdata_t::get_scratch_size(get_hip_func_attributes());
+  }
+
+  static hipFuncAttributes get_hip_func_attributes() {
+    return funcdata_t::get_hip_func_attributes(
+        reinterpret_cast<void const *>(get_kernel_func()));
+  }
+};
+
+// HIPLaunchMechanism::GlobalMemory specializations
+template <typename DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM>
+struct HIPParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    HIPLaunchMechanism::GlobalMemory> {
+  using funcdata_t = HIPParallelLaunchKernelFuncData<
+      DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+      HIPLaunchMechanism::GlobalMemory>;
+  static auto get_kernel_func() {
+    return hip_parallel_launch_global_memory<DriverType, MaxThreadsPerBlock,
+                                             MinBlocksPerSM>;
+  }
+
+  static constexpr auto default_launchbounds() { return false; }
+
+  static auto get_scratch_size() {
+    return funcdata_t::get_scratch_size(get_hip_func_attributes());
+  }
+
+  static hipFuncAttributes get_hip_func_attributes() {
+    return funcdata_t::get_hip_func_attributes(
+        reinterpret_cast<void const *>(get_kernel_func()));
+  }
+};
+
+template <typename DriverType>
+struct HIPParallelLaunchKernelFunc<DriverType, Kokkos::LaunchBounds<0, 0>,
+                                   HIPLaunchMechanism::GlobalMemory> {
+  using funcdata_t =
+      HIPParallelLaunchKernelFuncData<DriverType, Kokkos::LaunchBounds<0, 0>,
+                                      HIPLaunchMechanism::GlobalMemory>;
+  static auto get_kernel_func() {
+    return hip_parallel_launch_global_memory<DriverType>;
+  }
+
+  static constexpr auto default_launchbounds() { return true; }
+
+  static auto get_scratch_size() {
+    return funcdata_t::get_scratch_size(get_hip_func_attributes());
+  }
+
+  static hipFuncAttributes get_hip_func_attributes() {
+    return funcdata_t::get_hip_func_attributes(
+        reinterpret_cast<void const *>(get_kernel_func()));
+  }
+};
+
+// HIPLaunchMechanism::ConstantMemory specializations
+template <typename DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM>
+struct HIPParallelLaunchKernelFunc<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    HIPLaunchMechanism::ConstantMemory> {
+  using funcdata_t = HIPParallelLaunchKernelFuncData<
+      DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+      HIPLaunchMechanism::ConstantMemory>;
+  static auto get_kernel_func() {
+    return hip_parallel_launch_constant_memory<DriverType, MaxThreadsPerBlock,
+                                               MinBlocksPerSM>;
+  }
+
+  static constexpr auto default_launchbounds() { return false; }
+
+  static auto get_scratch_size() {
+    return funcdata_t::get_scratch_size(get_hip_func_attributes());
+  }
+
+  static hipFuncAttributes get_hip_func_attributes() {
+    return funcdata_t::get_hip_func_attributes(
+        reinterpret_cast<void const *>(get_kernel_func()));
+  }
+};
+
+template <typename DriverType>
+struct HIPParallelLaunchKernelFunc<DriverType, Kokkos::LaunchBounds<0, 0>,
+                                   HIPLaunchMechanism::ConstantMemory> {
+  using funcdata_t =
+      HIPParallelLaunchKernelFuncData<DriverType, Kokkos::LaunchBounds<0, 0>,
+                                      HIPLaunchMechanism::ConstantMemory>;
+  static auto get_kernel_func() {
+    return hip_parallel_launch_constant_memory<DriverType>;
+  }
+  static constexpr auto default_launchbounds() { return true; }
+
+  static auto get_scratch_size() {
+    return funcdata_t::get_scratch_size(get_hip_func_attributes());
+  }
+
+  static hipFuncAttributes get_hip_func_attributes() {
+    return funcdata_t::get_hip_func_attributes(
+        reinterpret_cast<void const *>(get_kernel_func()));
+  }
+};
+
+//------------------------------------------------------------------//
+// HIPParallelLaunchKernelInvoker structure and its specializations //
+//------------------------------------------------------------------//
+template <typename DriverType, typename LaunchBounds,
+          HIPLaunchMechanism LaunchMechanism>
+struct HIPParallelLaunchKernelInvoker;
+
+// HIPLaunchMechanism::LocalMemory specialization
+template <typename DriverType, typename LaunchBounds>
+struct HIPParallelLaunchKernelInvoker<DriverType, LaunchBounds,
+                                      HIPLaunchMechanism::LocalMemory>
+    : HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
+                                  HIPLaunchMechanism::LocalMemory> {
+  using base_t = HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
+                                             HIPLaunchMechanism::LocalMemory>;
+
+  static void invoke_kernel(DriverType const *driver, dim3 const &grid,
+                            dim3 const &block, int shmem,
+                            HIPInternal const *hip_instance) {
+    (base_t::get_kernel_func())<<<grid, block, shmem, hip_instance->m_stream>>>(
+        driver);
+  }
+};
+
+// HIPLaunchMechanism::GlobalMemory specialization
+template <typename DriverType, typename LaunchBounds>
+struct HIPParallelLaunchKernelInvoker<DriverType, LaunchBounds,
+                                      HIPLaunchMechanism::GlobalMemory>
+    : HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
+                                  HIPLaunchMechanism::GlobalMemory> {
+  using base_t = HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
+                                             HIPLaunchMechanism::GlobalMemory>;
+
+  // FIXME_HIP the code is different than cuda because driver cannot be passed
+  // by copy
+  static void invoke_kernel(DriverType const *driver, dim3 const &grid,
+                            dim3 const &block, int shmem,
+                            HIPInternal const *hip_instance) {
+    (base_t::get_kernel_func())<<<grid, block, shmem, hip_instance->m_stream>>>(
+        driver);
+  }
+};
+
+// HIPLaunchMechanism::ConstantMemory specializations
+template <typename DriverType, typename LaunchBounds>
+struct HIPParallelLaunchKernelInvoker<DriverType, LaunchBounds,
+                                      HIPLaunchMechanism::ConstantMemory>
+    : HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
+                                  HIPLaunchMechanism::ConstantMemory> {
+  using base_t =
+      HIPParallelLaunchKernelFunc<DriverType, LaunchBounds,
+                                  HIPLaunchMechanism::ConstantMemory>;
+  static_assert(sizeof(DriverType) < HIPTraits::ConstantMemoryUsage,
+                "Kokkos Error: Requested HIPLaunchConstantMemory with a "
+                "Functor larger than 32kB.");
+
+  static void invoke_kernel(DriverType const *driver, dim3 const &grid,
+                            dim3 const &block, int shmem,
+                            HIPInternal const *hip_instance) {
+    // Wait until the previous kernel that uses the constant buffer is done
+    std::lock_guard<std::mutex> lock(HIPInternal::constantMemMutex);
+    KOKKOS_IMPL_HIP_SAFE_CALL(
+        hipEventSynchronize(HIPInternal::constantMemReusable));
+
+    // Copy functor (synchronously) to staging buffer in pinned host memory
+    unsigned long *staging = hip_instance->constantMemHostStaging;
+    std::memcpy((void *)staging, (void *)driver, sizeof(DriverType));
+
+    // Copy functor asynchronously from there to constant memory on the device
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyToSymbolAsync(
+        HIP_SYMBOL(kokkos_impl_hip_constant_memory_buffer), staging,
+        sizeof(DriverType), 0, hipMemcpyHostToDevice, hip_instance->m_stream));
+
+    // Invoke the driver function on the device
+    (base_t::
+         get_kernel_func())<<<grid, block, shmem, hip_instance->m_stream>>>();
+
+    // Record an event that says when the constant buffer can be reused
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipEventRecord(HIPInternal::constantMemReusable,
+                                             hip_instance->m_stream));
+  }
+};
+
+//-----------------------------//
+// HIPParallelLaunch structure //
+//-----------------------------//
+template <typename DriverType, typename LaunchBounds = Kokkos::LaunchBounds<>,
+          HIPLaunchMechanism LaunchMechanism =
+              DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
+struct HIPParallelLaunch;
+
+template <typename DriverType, unsigned int MaxThreadsPerBlock,
+          unsigned int MinBlocksPerSM, HIPLaunchMechanism LaunchMechanism>
+struct HIPParallelLaunch<
+    DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+    LaunchMechanism>
+    : HIPParallelLaunchKernelInvoker<
+          DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+          LaunchMechanism> {
+  using base_t = HIPParallelLaunchKernelInvoker<
+      DriverType, Kokkos::LaunchBounds<MaxThreadsPerBlock, MinBlocksPerSM>,
+      LaunchMechanism>;
+
+  HIPParallelLaunch(const DriverType &driver, const dim3 &grid,
+                    const dim3 &block, const int shmem,
+                    const HIPInternal *hip_instance,
+                    const bool /*prefer_shmem*/) {
+    if ((grid.x != 0) && ((block.x * block.y * block.z) != 0)) {
+      if (hip_instance->m_maxShmemPerBlock < shmem) {
+        Kokkos::Impl::throw_runtime_exception(
+            "HIPParallelLaunch FAILED: shared memory request is too large");
+      }
+
+      KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE();
+
+      std::lock_guard<std::mutex> const lock(hip_instance->m_mutexWorkArray);
+
+      // Invoke the driver function on the device
+      DriverType *d_driver = reinterpret_cast<DriverType *>(
+          hip_instance->get_next_driver(sizeof(DriverType)));
+      std::memcpy((void *)d_driver, (void *)&driver, sizeof(DriverType));
+      base_t::invoke_kernel(d_driver, grid, block, shmem, hip_instance);
+
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+      KOKKOS_IMPL_HIP_SAFE_CALL(hipGetLastError());
+      hip_instance->fence(
+          "Kokkos::Experimental::Impl::HIParallelLaunch: Debug Only Check for "
+          "Execution Error");
+#endif
+    }
+  }
+};
+
+// convenience method to launch the correct kernel given the launch bounds et
+// al.
+template <typename DriverType, typename LaunchBounds = Kokkos::LaunchBounds<>,
+          HIPLaunchMechanism LaunchMechanism =
+              DeduceHIPLaunchMechanism<DriverType>::launch_mechanism>
+void hip_parallel_launch(const DriverType &driver, const dim3 &grid,
+                         const dim3 &block, const int shmem,
+                         const HIPInternal *hip_instance,
+                         const bool prefer_shmem) {
+#ifndef KOKKOS_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS
+  HIPParallelLaunch<DriverType, LaunchBounds, LaunchMechanism>(
+      driver, grid, block, shmem, hip_instance, prefer_shmem);
+#else
+  // FIXME_HIP - could be if constexpr for c++17
+  if (!HIPParallelLaunch<DriverType, LaunchBounds,
+                         LaunchMechanism>::default_launchbounds()) {
+    // for user defined, we *always* honor the request
+    HIPParallelLaunch<DriverType, LaunchBounds, LaunchMechanism>(
+        driver, grid, block, shmem, hip_instance, prefer_shmem);
+  } else {
+    // we can do what we like
+    const unsigned flat_block_size = block.x * block.y * block.z;
+    if (flat_block_size <= HIPTraits::ConservativeThreadsPerBlock) {
+      // we have to use the large blocksize
+      HIPParallelLaunch<
+          DriverType,
+          Kokkos::LaunchBounds<HIPTraits::ConservativeThreadsPerBlock, 1>,
+          LaunchMechanism>(driver, grid, block, shmem, hip_instance,
+                           prefer_shmem);
+    } else {
+      HIPParallelLaunch<DriverType,
+                        Kokkos::LaunchBounds<HIPTraits::MaxThreadsPerBlock, 1>,
+                        LaunchMechanism>(driver, grid, block, shmem,
+                                         hip_instance, prefer_shmem);
+    }
+  }
+#endif
+}
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Locks.cpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Locks.cpp
new file mode 100644 (file)
index 0000000..f1ffaf3
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <HIP/Kokkos_HIP_Locks.hpp>
+#include <HIP/Kokkos_HIP_Error.hpp>
+#include <Kokkos_HIP_Space.hpp>
+
+#include <hip/hip_runtime.h>
+
+#include <iostream>
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+namespace Impl {
+__device__ __constant__ HIPLockArrays g_device_hip_lock_arrays = {nullptr, 0};
+}
+#endif
+
+namespace {
+
+__global__ void init_lock_array_kernel_atomic() {
+  unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
+  if (i < KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1) {
+    Kokkos::Impl::g_device_hip_lock_arrays.atomic[i] = 0;
+  }
+}
+
+}  // namespace
+
+namespace Impl {
+
+HIPLockArrays g_host_hip_lock_arrays = {nullptr, 0};
+
+void initialize_host_hip_lock_arrays() {
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+  desul::Impl::init_lock_arrays();
+
+  DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE();
+#endif
+
+  if (g_host_hip_lock_arrays.atomic != nullptr) return;
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipMalloc(
+      &g_host_hip_lock_arrays.atomic,
+      sizeof(std::int32_t) * (KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1)));
+
+  g_host_hip_lock_arrays.n = ::Kokkos::Experimental::HIP::concurrency();
+
+  KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
+  init_lock_array_kernel_atomic<<<
+      (KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK + 1 + 255) / 256, 256, 0, nullptr>>>();
+}
+
+void finalize_host_hip_lock_arrays() {
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+  desul::Impl::finalize_lock_arrays();
+#endif
+
+  if (g_host_hip_lock_arrays.atomic == nullptr) return;
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(g_host_hip_lock_arrays.atomic));
+  g_host_hip_lock_arrays.atomic = nullptr;
+  g_host_hip_lock_arrays.n      = 0;
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+  KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
+#endif
+}
+
+}  // namespace Impl
+
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Locks.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Locks.hpp
new file mode 100644 (file)
index 0000000..c72616d
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_LOCKS_HPP
+#define KOKKOS_HIP_LOCKS_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <cstdint>
+
+#include <HIP/Kokkos_HIP_Error.hpp>
+
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+#include <desul/atomics/Lock_Array_HIP.hpp>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+struct HIPLockArrays {
+  std::int32_t* atomic;
+  std::int32_t n;
+};
+
+/// \brief This global variable in Host space is the central definition
+///        of these arrays.
+extern HIPLockArrays g_host_hip_lock_arrays;
+
+/// \brief After this call, the g_host_hip_lock_arrays variable has
+///        valid, initialized arrays.
+///
+/// This call is idempotent.
+void initialize_host_hip_lock_arrays();
+
+/// \brief After this call, the g_host_hip_lock_arrays variable has
+///        all null pointers, and all array memory has been freed.
+///
+/// This call is idempotent.
+void finalize_host_hip_lock_arrays();
+
+#if defined(__HIPCC__)
+
+/// \brief This global variable in HIP space is what kernels use
+///        to get access to the lock arrays.
+///
+/// When relocatable device code is enabled, there can be one single
+/// instance of this global variable for the entire executable,
+/// whose definition will be in Kokkos_HIP_Locks.cpp (and whose declaration
+/// here must then be extern).
+/// This one instance will be initialized by initialize_host_HIP_lock_arrays
+/// and need not be modified afterwards.
+///
+/// When relocatable device code is disabled, an instance of this variable
+/// will be created in every translation unit that sees this header file.
+/// Since the Kokkos_HIP_Locks.cpp translation unit cannot initialize the
+/// instances in other translation units, we must update this HIP global
+/// variable based on the Host global variable prior to running any kernels
+/// that will use it.
+/// That is the purpose of the KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE macro.
+__device__
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+    __constant__ extern
+#endif
+    HIPLockArrays g_device_hip_lock_arrays;
+
+#define KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK 0x1FFFF
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+__device__ inline bool lock_address_hip_space(void* ptr) {
+  auto offset = reinterpret_cast<size_t>(ptr);
+  offset      = offset >> 2;
+  offset      = offset & KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK;
+  return (0 == atomicCAS(&g_device_hip_lock_arrays.atomic[offset], 0, 1));
+}
+
+/// \brief Release lock for the address
+///
+/// This function releases the lock for the hash value derived
+/// from the provided ptr. This function should only be called
+/// after previously successfully aquiring a lock with
+/// lock_address.
+__device__ inline void unlock_address_hip_space(void* ptr) {
+  auto offset = reinterpret_cast<size_t>(ptr);
+  offset      = offset >> 2;
+  offset      = offset & KOKKOS_IMPL_HIP_SPACE_ATOMIC_MASK;
+  atomicExch(&g_device_hip_lock_arrays.atomic[offset], 0);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+// Make lock_array_copied an explicit translation unit scope thingy
+namespace Kokkos {
+namespace Impl {
+namespace {
+static int lock_array_copied = 0;
+inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
+}  // namespace
+}  // namespace Impl
+}  // namespace Kokkos
+
+/* Dan Ibanez: it is critical that this code be a macro, so that it will
+   capture the right address for g_device_hip_lock_arrays!
+   putting this in an inline function will NOT do the right thing! */
+#define KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE()                 \
+  {                                                             \
+    if (::Kokkos::Impl::lock_array_copied == 0) {               \
+      KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyToSymbol(              \
+          HIP_SYMBOL(::Kokkos::Impl::g_device_hip_lock_arrays), \
+          &::Kokkos::Impl::g_host_hip_lock_arrays,              \
+          sizeof(::Kokkos::Impl::HIPLockArrays)));              \
+    }                                                           \
+    ::Kokkos::Impl::lock_array_copied = 1;                      \
+  }
+
+#ifndef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
+#else
+#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE() \
+  KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE()
+#endif
+
+#else
+
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
+#else
+// Still Need COPY_CUDA_LOCK_ARRAYS for team scratch etc.
+#define KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE() \
+  KOKKOS_COPY_HIP_LOCK_ARRAYS_TO_DEVICE()         \
+  DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
+#endif
+
+#endif /* defined( KOKKOS_ENABLE_IMPL_DESUL_ATOMICS ) */
+
+#endif /* defined( __HIPCC__ ) */
+
+#endif /* #ifndef KOKKOS_HIP_LOCKS_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_MDRangePolicy.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_MDRangePolicy.hpp
new file mode 100644 (file)
index 0000000..acb538e
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef KOKKOS_HIP_MDRANGEPOLICY_HPP_
+#define KOKKOS_HIP_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+
+template <>
+struct default_outer_direction<Kokkos::Experimental::HIP> {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Left;
+};
+
+template <>
+struct default_inner_direction<Kokkos::Experimental::HIP> {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Left;
+};
+
+namespace Impl {
+
+// Settings for MDRangePolicy
+template <>
+inline TileSizeProperties get_tile_size_properties<Kokkos::Experimental::HIP>(
+    const Kokkos::Experimental::HIP& space) {
+  TileSizeProperties properties;
+  properties.max_threads =
+      space.impl_internal_space_instance()->m_maxThreadsPerSM;
+  properties.default_largest_tile_size = 16;
+  properties.default_tile_size         = 4;
+  properties.max_total_tile_size =
+      Kokkos::Experimental::Impl::HIPTraits::MaxThreadsPerBlock;
+  return properties;
+}
+
+}  // Namespace Impl
+}  // Namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_MDRange.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_MDRange.hpp
new file mode 100644 (file)
index 0000000..212bbb9
--- /dev/null
@@ -0,0 +1,451 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_PARALLEL_MDRANGE_HPP
+#define KOKKOS_HIP_PARALLEL_MDRANGE_HPP
+
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_ReduceScan.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+#include <impl/KokkosExp_IterateTileGPU.hpp>
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+// ParallelFor
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                  Kokkos::Experimental::HIP> {
+ public:
+  using Policy = Kokkos::MDRangePolicy<Traits...>;
+
+ private:
+  using array_index_type = typename Policy::array_index_type;
+  using index_type       = typename Policy::index_type;
+  using LaunchBounds     = typename Policy::launch_bounds;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  ParallelFor()        = delete;
+  ParallelFor& operator=(ParallelFor const&) = delete;
+
+ public:
+  inline __device__ void operator()() const {
+    Kokkos::Impl::DeviceIterateTile<Policy::rank, Policy, FunctorType,
+                                    typename Policy::work_tag>(m_policy,
+                                                               m_functor)
+        .exec_range();
+  }
+
+  inline void execute() const {
+    using ClosureType =
+        ParallelFor<FunctorType, Policy, Kokkos::Experimental::HIP>;
+    if (m_policy.m_num_tiles == 0) return;
+    auto const maxblocks =
+        Kokkos::Experimental::Impl::hip_internal_maximum_grid_count();
+    if (Policy::rank == 2) {
+      dim3 const block(m_policy.m_tile[0], m_policy.m_tile[1], 1);
+      dim3 const grid(
+          std::min<array_index_type>(
+              (m_policy.m_upper[0] - m_policy.m_lower[0] + block.x - 1) /
+                  block.x,
+              maxblocks[0]),
+          std::min<array_index_type>(
+              (m_policy.m_upper[1] - m_policy.m_lower[1] + block.y - 1) /
+                  block.y,
+              maxblocks[1]),
+          1);
+      Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
+                                                      LaunchBounds>(
+          *this, grid, block, 0,
+          m_policy.space().impl_internal_space_instance(), false);
+    } else if (Policy::rank == 3) {
+      dim3 const block(m_policy.m_tile[0], m_policy.m_tile[1],
+                       m_policy.m_tile[2]);
+      dim3 const grid(
+          std::min<array_index_type>(
+              (m_policy.m_upper[0] - m_policy.m_lower[0] + block.x - 1) /
+                  block.x,
+              maxblocks[0]),
+          std::min<array_index_type>(
+              (m_policy.m_upper[1] - m_policy.m_lower[1] + block.y - 1) /
+                  block.y,
+              maxblocks[1]),
+          std::min<array_index_type>(
+              (m_policy.m_upper[2] - m_policy.m_lower[2] + block.z - 1) /
+                  block.z,
+              maxblocks[2]));
+      Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
+                                                      LaunchBounds>(
+          *this, grid, block, 0,
+          m_policy.space().impl_internal_space_instance(), false);
+    } else if (Policy::rank == 4) {
+      // id0,id1 encoded within threadIdx.x; id2 to threadIdx.y; id3 to
+      // threadIdx.z
+      dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
+                       m_policy.m_tile[2], m_policy.m_tile[3]);
+      dim3 const grid(
+          std::min<array_index_type>(
+              m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
+          std::min<array_index_type>(
+              (m_policy.m_upper[2] - m_policy.m_lower[2] + block.y - 1) /
+                  block.y,
+              maxblocks[1]),
+          std::min<array_index_type>(
+              (m_policy.m_upper[3] - m_policy.m_lower[3] + block.z - 1) /
+                  block.z,
+              maxblocks[2]));
+      Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
+                                                      LaunchBounds>(
+          *this, grid, block, 0,
+          m_policy.space().impl_internal_space_instance(), false);
+    } else if (Policy::rank == 5) {
+      // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y; id4
+      // to threadIdx.z
+      dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
+                       m_policy.m_tile[2] * m_policy.m_tile[3],
+                       m_policy.m_tile[4]);
+      dim3 const grid(
+          std::min<array_index_type>(
+              m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
+          std::min<array_index_type>(
+              m_policy.m_tile_end[2] * m_policy.m_tile_end[3], maxblocks[1]),
+          std::min<array_index_type>(
+              (m_policy.m_upper[4] - m_policy.m_lower[4] + block.z - 1) /
+                  block.z,
+              maxblocks[2]));
+      Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
+                                                      LaunchBounds>(
+          *this, grid, block, 0,
+          m_policy.space().impl_internal_space_instance(), false);
+    } else if (Policy::rank == 6) {
+      // id0,id1 encoded within threadIdx.x; id2,id3 to threadIdx.y;
+      // id4,id5 to threadIdx.z
+      dim3 const block(m_policy.m_tile[0] * m_policy.m_tile[1],
+                       m_policy.m_tile[2] * m_policy.m_tile[3],
+                       m_policy.m_tile[4] * m_policy.m_tile[5]);
+      dim3 const grid(
+          std::min<array_index_type>(
+              m_policy.m_tile_end[0] * m_policy.m_tile_end[1], maxblocks[0]),
+          std::min<array_index_type>(
+              m_policy.m_tile_end[2] * m_policy.m_tile_end[3], maxblocks[1]),
+          std::min<array_index_type>(
+              m_policy.m_tile_end[4] * m_policy.m_tile_end[5], maxblocks[2]));
+      Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
+                                                      LaunchBounds>(
+          *this, grid, block, 0,
+          m_policy.space().impl_internal_space_instance(), false);
+    } else {
+      Kokkos::abort("Kokkos::MDRange Error: Exceeded rank bounds with HIP\n");
+    }
+
+  }  // end execute
+
+  ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    using closure_type =
+        ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                    Kokkos::Experimental::HIP>;
+    unsigned block_size =
+        Kokkos::Experimental::Impl::hip_get_max_blocksize<closure_type,
+                                                          LaunchBounds>();
+    if (block_size == 0)
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelFor< HIP > could not find a valid "
+                      "tile size."));
+    return block_size;
+  }
+};
+
+// ParallelReduce
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::HIP> {
+ public:
+  using Policy = Kokkos::MDRangePolicy<Traits...>;
+
+ private:
+  using array_index_type = typename Policy::array_index_type;
+  using index_type       = typename Policy::index_type;
+
+  using WorkTag      = typename Policy::work_tag;
+  using Member       = typename Policy::member_type;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis =
+      Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
+                                    ReducerTypeFwd>;
+
+ public:
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  using size_type      = Experimental::HIP::size_type;
+
+  // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
+  // blockDim.z == 1
+
+  const FunctorType m_functor;
+  const Policy m_policy;  // used for workrange and nwork
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+  size_type* m_scratch_space;
+  size_type* m_scratch_flags;
+  // Only let one Parallel/Scan modify the shared memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::lock_guard<std::mutex> m_shared_memory_lock;
+
+  using DeviceIteratePattern = typename Kokkos::Impl::Reduce::DeviceIterateTile<
+      Policy::rank, Policy, FunctorType, WorkTag, reference_type>;
+
+ public:
+  inline __device__ void exec_range(reference_type update) const {
+    DeviceIteratePattern(m_policy, m_functor, update).exec_range();
+  }
+
+  inline __device__ void operator()() const {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(
+                       ReducerConditional::select(m_functor, m_reducer)) /
+                   sizeof(size_type));
+
+    {
+      reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
+          Experimental::kokkos_impl_hip_shared_memory<size_type>() +
+          threadIdx.y * word_count.value));
+
+      // Number of blocks is bounded so that the reduction can be limited to two
+      // passes. Each thread block is given an approximately equal amount of
+      // work to perform. Accumulate the values for this block. The accumulation
+      // ordering does not match the final pass, but is arithmatically
+      // equivalent.
+
+      this->exec_range(value);
+    }
+
+    // Reduce with final value at blockDim.y - 1 location.
+    // Problem: non power-of-two blockDim
+    if (::Kokkos::Impl::hip_single_inter_block_reduce_scan<false>(
+            final_reducer, blockIdx.x, gridDim.x,
+            Experimental::kokkos_impl_hip_shared_memory<size_type>(),
+            m_scratch_space, m_scratch_flags)) {
+      // This is the final block with the final result at the final threads'
+      // location
+      size_type* const shared =
+          Experimental::kokkos_impl_hip_shared_memory<size_type>() +
+          (blockDim.y - 1) * word_count.value;
+      size_type* const global = m_result_ptr_device_accessible
+                                    ? reinterpret_cast<size_type*>(m_result_ptr)
+                                    : m_scratch_space;
+
+      if (threadIdx.y == 0) {
+        final_reducer.final(reinterpret_cast<value_type*>(shared));
+      }
+
+      if (Experimental::Impl::HIPTraits::WarpSize < word_count.value) {
+        __syncthreads();
+      }
+
+      for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+        global[i] = shared[i];
+      }
+    }
+  }
+
+  // Determine block size constrained by shared memory:
+  // This is copy/paste from Kokkos_HIP_Parallel_Range
+  inline unsigned local_block_size(const FunctorType& f) {
+    const auto& instance = m_policy.space().impl_internal_space_instance();
+    auto shmem_functor   = [&f](unsigned n) {
+      return hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                      WorkTag>(f, n);
+    };
+    using closure_type = ParallelReduce<FunctorType, Policy, ReducerType,
+                                        Kokkos::Experimental::HIP>;
+
+    unsigned block_size =
+        Kokkos::Experimental::Impl::hip_get_preferred_blocksize<closure_type,
+                                                                LaunchBounds>(
+            instance, shmem_functor);
+    if (block_size == 0) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
+                      "valid tile size."));
+    }
+    return block_size;
+  }
+
+  inline void execute() {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    using ClosureType = ParallelReduce<FunctorType, Policy, ReducerType,
+                                       Kokkos::Experimental::HIP>;
+    const auto nwork  = m_policy.m_num_tiles;
+    if (nwork) {
+      int block_size = m_policy.m_prod_tile_dims;
+      // CONSTRAINT: Algorithm requires block_size >= product of tile dimensions
+      // Nearest power of two
+      int exponent_pow_two    = std::ceil(std::log2(block_size));
+      block_size              = std::pow(2, exponent_pow_two);
+      int suggested_blocksize = local_block_size(m_functor);
+
+      block_size = (block_size > suggested_blocksize)
+                       ? block_size
+                       : suggested_blocksize;  // Note: block_size must be less
+                                               // than or equal to 512
+
+      m_scratch_space =
+          ::Kokkos::Experimental::Impl::hip_internal_scratch_space(
+              m_policy.space(),
+              Analysis::value_size(
+                  ReducerConditional::select(m_functor, m_reducer)) *
+                  block_size /* block_size == max block_count */);
+      m_scratch_flags =
+          ::Kokkos::Experimental::Impl::hip_internal_scratch_flags(
+              m_policy.space(), sizeof(size_type));
+
+      // REQUIRED ( 1 , N , 1 )
+      const dim3 block(1, block_size, 1);
+      // Required grid.x <= block.y
+      const dim3 grid(std::min(static_cast<uint32_t>(block.y),
+                               static_cast<uint32_t>(nwork)),
+                      1, 1);
+
+      const int shmem =
+          ::Kokkos::Impl::hip_single_inter_block_reduce_scan_shmem<
+              false, FunctorType, WorkTag>(m_functor, block.y);
+
+      Kokkos::Experimental::Impl::hip_parallel_launch<ClosureType,
+                                                      LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+
+      if (!m_result_ptr_device_accessible && m_result_ptr) {
+        const int size = Analysis::value_size(
+            ReducerConditional::select(m_functor, m_reducer));
+        DeepCopy<HostSpace, Experimental::HIPSpace, Experimental::HIP>(
+            m_policy.space(), m_result_ptr, m_scratch_space, size);
+      }
+    } else {
+      if (m_result_ptr) {
+        final_reducer.init(m_result_ptr);
+      }
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(
+      const FunctorType& arg_functor, const Policy& arg_policy,
+      const ViewType& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_shared_memory_lock(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_mutexSharedMemory) {}
+
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_shared_memory_lock(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_mutexSharedMemory) {}
+
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    using closure_type =
+        ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                       ReducerType, Kokkos::Experimental::HIP>;
+    unsigned block_size =
+        Kokkos::Experimental::Impl::hip_get_max_blocksize<closure_type,
+                                                          LaunchBounds>();
+    if (block_size == 0) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
+                      "valid tile size."));
+    }
+    return block_size;
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_Range.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_Range.hpp
new file mode 100644 (file)
index 0000000..5c871e0
--- /dev/null
@@ -0,0 +1,771 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKO_HIP_PARALLEL_RANGE_HPP
+#define KOKKO_HIP_PARALLEL_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#if defined(__HIPCC__)
+
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_ReduceScan.hpp>
+#include <HIP/Kokkos_HIP_Shuffle_Reduce.hpp>
+#include <impl/Kokkos_Traits.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+                  Kokkos::Experimental::HIP> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  ParallelFor()        = delete;
+  ParallelFor& operator=(const ParallelFor&) = delete;
+
+  template <class TagType>
+  inline __device__ std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const Member i) const {
+    m_functor(i);
+  }
+
+  template <class TagType>
+  inline __device__ std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const Member i) const {
+    m_functor(TagType(), i);
+  }
+
+ public:
+  using functor_type = FunctorType;
+
+  inline __device__ void operator()() const {
+    const Member work_stride = blockDim.y * gridDim.x;
+    const Member work_end    = m_policy.end();
+
+    for (Member iwork =
+             m_policy.begin() + threadIdx.y + blockDim.y * blockIdx.x;
+         iwork < work_end;
+         iwork = iwork < work_end - work_stride ? iwork + work_stride
+                                                : work_end) {
+      this->template exec_range<WorkTag>(iwork);
+    }
+  }
+
+  inline void execute() const {
+    const typename Policy::index_type nwork = m_policy.end() - m_policy.begin();
+
+    using DriverType =
+        ParallelFor<FunctorType, Policy, Kokkos::Experimental::HIP>;
+    const int block_size =
+        Kokkos::Experimental::Impl::hip_get_preferred_blocksize<DriverType,
+                                                                LaunchBounds>();
+    const dim3 block(1, block_size, 1);
+    const dim3 grid(
+        typename Policy::index_type((nwork + block.y - 1) / block.y), 1, 1);
+
+    if (block_size == 0) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelFor< HIP > could not find a "
+                      "valid execution configuration."));
+    }
+    Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
+        *this, grid, block, 0, m_policy.space().impl_internal_space_instance(),
+        false);
+  }
+
+  ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::HIP> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using WorkRange    = typename Policy::WorkRange;
+  using WorkTag      = typename Policy::work_tag;
+  using Member       = typename Policy::member_type;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis =
+      Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy,
+                                    ReducerTypeFwd>;
+
+ public:
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  using size_type      = Kokkos::Experimental::HIP::size_type;
+  using index_type     = typename Policy::index_type;
+
+  // Algorithmic constraints: blockSize is a power of two AND blockDim.y ==
+  // blockDim.z == 1
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+  const bool m_result_ptr_host_accessible;
+  size_type* m_scratch_space = nullptr;
+  size_type* m_scratch_flags = nullptr;
+  // Only let one ParallelReduce/Scan modify the shared memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::lock_guard<std::mutex> m_shared_memory_lock;
+
+  static bool constexpr UseShflReduction =
+      static_cast<bool>(Analysis::StaticValueSize);
+
+ private:
+  struct ShflReductionTag {};
+  struct SHMEMReductionTag {};
+
+  // Make the exec_range calls call to Reduce::DeviceIterateTile
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update) const {
+    m_functor(i, update);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update) const {
+    m_functor(TagType(), i, update);
+  }
+
+ public:
+  __device__ inline void operator()() const {
+    using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
+                                            SHMEMReductionTag>;
+    run(ReductionTag{});
+  }
+
+  __device__ inline void run(SHMEMReductionTag) const {
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(
+                       ReducerConditional::select(m_functor, m_reducer)) /
+                   sizeof(size_type));
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+    {
+      reference_type value = final_reducer.init(reinterpret_cast<pointer_type>(
+          ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
+          threadIdx.y * word_count.value));
+
+      // Number of blocks is bounded so that the reduction can be limited to two
+      // passes. Each thread block is given an approximately equal amount of
+      // work to perform. Accumulate the values for this block. The accumulation
+      // ordering does not match the final pass, but is arithmetically
+      // equivalent.
+
+      const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+      for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+           iwork < iwork_end; iwork += blockDim.y) {
+        this->template exec_range<WorkTag>(iwork, value);
+      }
+    }
+
+    // Reduce with final value at blockDim.y - 1 location.
+    // Shortcut for length zero reduction
+    bool do_final_reduction = m_policy.begin() == m_policy.end();
+    if (!do_final_reduction)
+      do_final_reduction = hip_single_inter_block_reduce_scan<false>(
+          final_reducer, blockIdx.x, gridDim.x,
+          ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>(),
+          m_scratch_space, m_scratch_flags);
+    if (do_final_reduction) {
+      // This is the final block with the final result at the final threads'
+      // location
+
+      size_type* const shared =
+          ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
+          (blockDim.y - 1) * word_count.value;
+      size_type* const global = m_result_ptr_device_accessible
+                                    ? reinterpret_cast<size_type*>(m_result_ptr)
+                                    : m_scratch_space;
+
+      if (threadIdx.y == 0) {
+        final_reducer.final(reinterpret_cast<value_type*>(shared));
+      }
+
+      if (::Kokkos::Experimental::Impl::HIPTraits::WarpSize <
+          word_count.value) {
+        __syncthreads();
+      }
+
+      for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+        global[i] = shared[i];
+      }
+    }
+  }
+
+  __device__ inline void run(ShflReductionTag) const {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    value_type value;
+    final_reducer.init(&value);
+    // Number of blocks is bounded so that the reduction can be limited to two
+    // passes. Each thread block is given an approximately equal amount of work
+    // to perform. Accumulate the values for this block. The accumulation
+    // ordering does not match the final pass, but is arithmetically equivalent.
+
+    WorkRange const range(m_policy, blockIdx.x, gridDim.x);
+
+    for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+         iwork < iwork_end; iwork += blockDim.y) {
+      this->template exec_range<WorkTag>(iwork, value);
+    }
+
+    pointer_type const result = reinterpret_cast<pointer_type>(m_scratch_space);
+
+    int max_active_thread = static_cast<int>(range.end() - range.begin()) <
+                                    static_cast<int>(blockDim.y)
+                                ? range.end() - range.begin()
+                                : blockDim.y;
+
+    max_active_thread =
+        (max_active_thread == 0) ? blockDim.y : max_active_thread;
+
+    value_type init;
+    final_reducer.init(&init);
+    if (m_policy.begin() == m_policy.end()) {
+      final_reducer.final(&value);
+      pointer_type const final_result =
+          m_result_ptr_device_accessible ? m_result_ptr : result;
+      *final_result = value;
+    } else if (Impl::hip_inter_block_shuffle_reduction<>(
+                   value, init, final_reducer, m_scratch_space, result,
+                   m_scratch_flags, max_active_thread)) {
+      unsigned int const id = threadIdx.y * blockDim.x + threadIdx.x;
+      if (id == 0) {
+        final_reducer.final(&value);
+        pointer_type const final_result =
+            m_result_ptr_device_accessible ? m_result_ptr : result;
+        *final_result = value;
+      }
+    }
+  }
+
+  // Determine block size constrained by shared memory:
+  inline unsigned local_block_size(const FunctorType& f) {
+    const auto& instance = m_policy.space().impl_internal_space_instance();
+    auto shmem_functor   = [&f](unsigned n) {
+      return hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                      WorkTag>(f, n);
+    };
+    using DriverType = ParallelReduce<FunctorType, Policy, ReducerType,
+                                      Kokkos::Experimental::HIP>;
+    return Kokkos::Experimental::Impl::hip_get_preferred_blocksize<
+        DriverType, LaunchBounds>(instance, shmem_functor);
+  }
+
+  inline void execute() {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const index_type nwork     = m_policy.end() - m_policy.begin();
+    const bool need_device_set = Analysis::has_init_member_function ||
+                                 Analysis::has_final_member_function ||
+                                 !m_result_ptr_host_accessible ||
+                                 !std::is_same<ReducerType, InvalidType>::value;
+    if ((nwork > 0) || need_device_set) {
+      const int block_size = local_block_size(m_functor);
+      if (block_size == 0) {
+        Kokkos::Impl::throw_runtime_exception(
+            std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
+                        "valid execution configuration."));
+      }
+
+      m_scratch_space =
+          ::Kokkos::Experimental::Impl::hip_internal_scratch_space(
+              m_policy.space(),
+              Analysis::value_size(
+                  ReducerConditional::select(m_functor, m_reducer)) *
+                  block_size /* block_size == max block_count */);
+      m_scratch_flags =
+          ::Kokkos::Experimental::Impl::hip_internal_scratch_flags(
+              m_policy.space(), sizeof(size_type));
+
+      // REQUIRED ( 1 , N , 1 )
+      dim3 block(1, block_size, 1);
+      // Required grid.x <= block.y
+      dim3 grid(std::min(block.y, static_cast<uint32_t>((nwork + block.y - 1) /
+                                                        block.y)),
+                1, 1);
+
+      if (nwork == 0) {
+        block = dim3(1, 1, 1);
+        grid  = dim3(1, 1, 1);
+      }
+      const int shmem =
+          UseShflReduction
+              ? 0
+              : hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                         WorkTag>(m_functor,
+                                                                  block.y);
+
+      using DriverType = ParallelReduce<FunctorType, Policy, ReducerType,
+                                        Kokkos::Experimental::HIP>;
+      Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+
+      if (!m_result_ptr_device_accessible && m_result_ptr) {
+        const int size = Analysis::value_size(
+            ReducerConditional::select(m_functor, m_reducer));
+        DeepCopy<HostSpace, ::Kokkos::Experimental::HIPSpace,
+                 ::Kokkos::Experimental::HIP>(m_policy.space(), m_result_ptr,
+                                              m_scratch_space, size);
+      }
+    } else {
+      if (m_result_ptr) {
+        final_reducer.init(m_result_ptr);
+      }
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(
+      const FunctorType& arg_functor, const Policy& arg_policy,
+      const ViewType& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_shared_memory_lock(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_mutexSharedMemory) {}
+
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_shared_memory_lock(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_mutexSharedMemory) {}
+};
+
+template <class FunctorType, class... Traits>
+class ParallelScanHIPBase {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ protected:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using WorkRange    = typename Policy::WorkRange;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  using Analysis = Kokkos::Impl::FunctorAnalysis<FunctorPatternInterface::SCAN,
+                                                 Policy, FunctorType>;
+
+ public:
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  using size_type      = Kokkos::Experimental::HIP::size_type;
+  using index_type     = typename Policy::index_type;
+
+ protected:
+  // Algorithmic constraints:
+  //  (a) blockDim.y is a power of two
+  //  (b) blockDim.x == blockDim.z == 1
+  //  (c) gridDim.x  <= blockDim.y * blockDim.y
+  //  (d) gridDim.y  == gridDim.z == 1
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  size_type* m_scratch_space = nullptr;
+  size_type* m_scratch_flags = nullptr;
+  size_type m_final          = false;
+  int m_grid_x               = 0;
+  // Only let one ParallelReduce/Scan modify the shared memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::lock_guard<std::mutex> m_shared_memory_lock;
+
+ private:
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update, const bool final_result) const {
+    m_functor(i, update, final_result);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const Member& i, reference_type update, const bool final_result) const {
+    m_functor(TagType(), i, update, final_result);
+  }
+
+  //----------------------------------------
+
+  __device__ inline void initial() const {
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+
+    pointer_type const shared_value = reinterpret_cast<pointer_type>(
+        Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
+        word_count.value * threadIdx.y);
+
+    final_reducer.init(shared_value);
+
+    // Number of blocks is bounded so that the reduction can be limited to two
+    // passes. Each thread block is given an approximately equal amount of work
+    // to perform. Accumulate the values for this block. The accumulation
+    // ordering does not match the final pass, but is arithmetically equivalent.
+
+    const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+    for (Member iwork = range.begin() + threadIdx.y, iwork_end = range.end();
+         iwork < iwork_end; iwork += blockDim.y) {
+      this->template exec_range<WorkTag>(
+          iwork, final_reducer.reference(shared_value), false);
+    }
+
+    // Reduce and scan, writing out scan of blocks' totals and block-groups'
+    // totals. Blocks' scan values are written to 'blockIdx.x' location.
+    // Block-groups' scan values are at: i = ( j * blockDim.y - 1 ) for i <
+    // gridDim.x
+    hip_single_inter_block_reduce_scan<true>(
+        final_reducer, blockIdx.x, gridDim.x,
+        Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>(),
+        m_scratch_space, m_scratch_flags);
+  }
+
+  //----------------------------------------
+
+  __device__ inline void final() const {
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    const integral_nonzero_constant<size_type, Analysis::StaticValueSize /
+                                                   sizeof(size_type)>
+        word_count(Analysis::value_size(m_functor) / sizeof(size_type));
+
+    // Use shared memory as an exclusive scan: { 0 , value[0] , value[1] ,
+    // value[2] , ... }
+    size_type* const shared_data =
+        Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>();
+    size_type* const shared_prefix =
+        shared_data + word_count.value * threadIdx.y;
+    size_type* const shared_accum =
+        shared_data + word_count.value * (blockDim.y + 1);
+
+    // Starting value for this thread block is the previous block's total.
+    if (blockIdx.x) {
+      size_type* const block_total =
+          m_scratch_space + word_count.value * (blockIdx.x - 1);
+      for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+        shared_accum[i] = block_total[i];
+      }
+    } else if (0 == threadIdx.y) {
+      final_reducer.init(reinterpret_cast<pointer_type>(shared_accum));
+    }
+
+    const WorkRange range(m_policy, blockIdx.x, gridDim.x);
+
+    for (typename Policy::member_type iwork_base = range.begin();
+         iwork_base < range.end(); iwork_base += blockDim.y) {
+      const typename Policy::member_type iwork = iwork_base + threadIdx.y;
+
+      __syncthreads();  // Don't overwrite previous iteration values until they
+                        // are used
+
+      final_reducer.init(
+          reinterpret_cast<pointer_type>(shared_prefix + word_count.value));
+
+      // Copy previous block's accumulation total into thread[0] prefix and
+      // inclusive scan value of this block
+      for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+        shared_data[i + word_count.value] = shared_data[i] = shared_accum[i];
+      }
+
+      // Make sure the write is seen by all threads
+      __threadfence_block();
+
+      // Call functor to accumulate inclusive scan value for this work item
+      const bool doWork = (iwork < range.end());
+      if (doWork) {
+        this->template exec_range<WorkTag>(
+            iwork,
+            final_reducer.reference(reinterpret_cast<pointer_type>(
+                shared_prefix + word_count.value)),
+            false);
+      }
+
+      // Scan block values into locations shared_data[1..blockDim.y]
+      hip_intra_block_reduce_scan<true>(
+          final_reducer,
+          typename Analysis::pointer_type(shared_data + word_count.value));
+
+      {
+        size_type* const block_total =
+            shared_data + word_count.value * blockDim.y;
+        for (unsigned i = threadIdx.y; i < word_count.value; ++i) {
+          shared_accum[i] = block_total[i];
+        }
+      }
+
+      // Call functor with exclusive scan value
+      if (doWork) {
+        this->template exec_range<WorkTag>(
+            iwork,
+            final_reducer.reference(
+                reinterpret_cast<pointer_type>(shared_prefix)),
+            true);
+      }
+    }
+  }
+
+ public:
+  //----------------------------------------
+
+  __device__ inline void operator()() const {
+    if (!m_final) {
+      initial();
+    } else {
+      final();
+    }
+  }
+
+  // Determine block size constrained by shared memory:
+  virtual inline unsigned local_block_size(const FunctorType& f) = 0;
+
+  inline void impl_execute() {
+    const index_type nwork = m_policy.end() - m_policy.begin();
+    if (nwork) {
+      // FIXME_HIP we cannot choose it larger for large work sizes to work
+      // correctly, the unit tests fail with wrong results
+      const int gridMaxComputeCapability_2x = 0x01fff;
+
+      const int block_size = static_cast<int>(local_block_size(m_functor));
+      if (block_size == 0) {
+        Kokkos::Impl::throw_runtime_exception(
+            std::string("Kokkos::Impl::ParallelScan< HIP > could not find a "
+                        "valid execution configuration."));
+      }
+
+      const int grid_max =
+          std::min(block_size * block_size, gridMaxComputeCapability_2x);
+
+      // At most 'max_grid' blocks:
+      const int max_grid =
+          std::min<int>(grid_max, (nwork + block_size - 1) / block_size);
+
+      // How much work per block:
+      const int work_per_block = (nwork + max_grid - 1) / max_grid;
+
+      // How many block are really needed for this much work:
+      m_grid_x = (nwork + work_per_block - 1) / work_per_block;
+
+      m_scratch_space = Kokkos::Experimental::Impl::hip_internal_scratch_space(
+          m_policy.space(), Analysis::value_size(m_functor) * m_grid_x);
+      m_scratch_flags = Kokkos::Experimental::Impl::hip_internal_scratch_flags(
+          m_policy.space(), sizeof(size_type) * 1);
+
+      dim3 grid(m_grid_x, 1, 1);
+      dim3 block(1, block_size, 1);  // REQUIRED DIMENSIONS ( 1 , N , 1 )
+      const int shmem = Analysis::value_size(m_functor) * (block_size + 2);
+
+      m_final = false;
+      // these ones are OK to be just the base because the specializations
+      // do not modify the kernel at all
+      using DriverType = ParallelScanHIPBase<FunctorType, Traits...>;
+      Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+
+      m_final = true;
+      Kokkos::Experimental::Impl::hip_parallel_launch<DriverType, LaunchBounds>(
+          *this, grid, block, shmem,
+          m_policy.space().impl_internal_space_instance(),
+          false);  // copy to device and execute
+    }
+  }
+
+  ParallelScanHIPBase(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_shared_memory_lock(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_mutexSharedMemory) {}
+};
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                   Kokkos::Experimental::HIP>
+    : public ParallelScanHIPBase<FunctorType, Traits...> {
+ public:
+  using Base = ParallelScanHIPBase<FunctorType, Traits...>;
+  using Base::operator();
+
+  inline void execute() { Base::impl_execute(); }
+
+  ParallelScan(const FunctorType& arg_functor,
+               const typename Base::Policy& arg_policy)
+      : Base(arg_functor, arg_policy) {}
+
+  inline unsigned local_block_size(const FunctorType& f) {
+    // blockDim.y must be power of two = 128 (2 warps) or 256 (4 warps) or
+    // 512 (8 warps) gridDim.x <= blockDim.y * blockDim.y
+
+    const auto& instance =
+        Base::m_policy.space().impl_internal_space_instance();
+    auto shmem_functor = [&f](unsigned n) {
+      return hip_single_inter_block_reduce_scan_shmem<true, FunctorType,
+                                                      typename Base::WorkTag>(
+          f, n);
+    };
+    using DriverType = ParallelScan<FunctorType, typename Base::Policy,
+                                    Kokkos::Experimental::HIP>;
+    return Kokkos::Experimental::Impl::hip_get_preferred_blocksize<
+        DriverType, typename Base::LaunchBounds>(instance, shmem_functor);
+  }
+};
+
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::Experimental::HIP>
+    : public ParallelScanHIPBase<FunctorType, Traits...> {
+ public:
+  using Base = ParallelScanHIPBase<FunctorType, Traits...>;
+  using Base::operator();
+
+  ReturnType& m_returnvalue;
+
+  inline void execute() {
+    Base::impl_execute();
+
+    const auto nwork = Base::m_policy.end() - Base::m_policy.begin();
+    if (nwork) {
+      const int size = Base::Analysis::value_size(Base::m_functor);
+      DeepCopy<HostSpace, Kokkos::Experimental::HIPSpace,
+               Kokkos::Experimental::HIP>(
+          Base::m_policy.space(), &m_returnvalue,
+          Base::m_scratch_space + (Base::m_grid_x - 1) * size / sizeof(int),
+          size);
+    }
+  }
+
+  ParallelScanWithTotal(const FunctorType& arg_functor,
+                        const typename Base::Policy& arg_policy,
+                        ReturnType& arg_returnvalue)
+      : Base(arg_functor, arg_policy), m_returnvalue(arg_returnvalue) {}
+
+  inline unsigned local_block_size(const FunctorType& f) {
+    // blockDim.y must be power of two = 128 (2 warps) or 256 (4 warps) or
+    // 512 (8 warps) gridDim.x <= blockDim.y * blockDim.y
+
+    const auto& instance =
+        Base::m_policy.space().impl_internal_space_instance();
+    auto shmem_functor = [&f](unsigned n) {
+      return hip_single_inter_block_reduce_scan_shmem<true, FunctorType,
+                                                      typename Base::WorkTag>(
+          f, n);
+    };
+    using DriverType =
+        ParallelScanWithTotal<FunctorType, typename Base::Policy, ReturnType,
+                              Kokkos::Experimental::HIP>;
+    return Kokkos::Experimental::Impl::hip_get_preferred_blocksize<
+        DriverType, typename Base::LaunchBounds>(instance, shmem_functor);
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_Team.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Parallel_Team.hpp
new file mode 100644 (file)
index 0000000..69ced48
--- /dev/null
@@ -0,0 +1,1082 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKO_HIP_PARALLEL_TEAM_HPP
+#define KOKKO_HIP_PARALLEL_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#if defined(__HIPCC__)
+
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_Locks.hpp>
+#include <HIP/Kokkos_HIP_Team.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <Kokkos_MinMaxClamp.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename... Properties>
+class TeamPolicyInternal<Kokkos::Experimental::HIP, Properties...>
+    : public PolicyTraits<Properties...> {
+ public:
+  using execution_policy = TeamPolicyInternal;
+
+  using traits    = PolicyTraits<Properties...>;
+  using BlockType = Kokkos::Experimental::Impl::BlockType;
+
+  template <typename ExecSpace, typename... OtherProperties>
+  friend class TeamPolicyInternal;
+
+ private:
+  typename traits::execution_space m_space;
+  int m_league_size;
+  int m_team_size;
+  int m_vector_length;
+  size_t m_team_scratch_size[2];
+  size_t m_thread_scratch_size[2];
+  int m_chunk_size;
+  bool m_tune_team_size;
+  bool m_tune_vector_length;
+
+ public:
+  using execution_space = Kokkos::Experimental::HIP;
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(TeamPolicyInternal<OtherProperties...> const& p) {
+    m_league_size            = p.m_league_size;
+    m_team_size              = p.m_team_size;
+    m_vector_length          = p.m_vector_length;
+    m_team_scratch_size[0]   = p.m_team_scratch_size[0];
+    m_team_scratch_size[1]   = p.m_team_scratch_size[1];
+    m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+    m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+    m_chunk_size             = p.m_chunk_size;
+    m_space                  = p.m_space;
+    m_tune_team_size         = p.m_tune_team_size;
+    m_tune_vector_length     = p.m_tune_vector_length;
+  }
+
+  template <typename FunctorType>
+  int team_size_max(FunctorType const& f, ParallelForTag const&) const {
+    using closure_type =
+        Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
+
+    return internal_team_size_common<BlockType::Max, closure_type>(f);
+  }
+
+  template <class FunctorType>
+  inline int team_size_max(const FunctorType& f,
+                           const ParallelReduceTag&) const {
+    using functor_analysis_type =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                              TeamPolicyInternal, FunctorType>;
+    using reducer_type = typename Impl::ParallelReduceReturnValue<
+        void, typename functor_analysis_type::value_type,
+        FunctorType>::reducer_type;
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             reducer_type>;
+    return internal_team_size_max<closure_type>(f);
+  }
+
+  template <typename FunctorType, typename ReducerType>
+  inline int team_size_max(const FunctorType& f, const ReducerType&,
+                           const ParallelReduceTag&) const {
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             ReducerType>;
+    return internal_team_size_max<closure_type>(f);
+  }
+
+  template <typename FunctorType>
+  int team_size_recommended(FunctorType const& f, ParallelForTag const&) const {
+    using closure_type =
+        Impl::ParallelFor<FunctorType, TeamPolicy<Properties...>>;
+
+    return internal_team_size_common<BlockType::Preferred, closure_type>(f);
+  }
+
+  template <typename FunctorType>
+  inline int team_size_recommended(FunctorType const& f,
+                                   ParallelReduceTag const&) const {
+    using functor_analysis_type =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                              TeamPolicyInternal, FunctorType>;
+    using reducer_type = typename Impl::ParallelReduceReturnValue<
+        void, typename functor_analysis_type::value_type,
+        FunctorType>::reducer_type;
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             reducer_type>;
+    return internal_team_size_recommended<closure_type>(f);
+  }
+
+  template <typename FunctorType, typename ReducerType>
+  int team_size_recommended(FunctorType const& f, ReducerType const&,
+                            ParallelReduceTag const&) const {
+    using closure_type =
+        Impl::ParallelReduce<FunctorType, TeamPolicy<Properties...>,
+                             ReducerType>;
+    return internal_team_size_recommended<closure_type>(f);
+  }
+
+  inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
+  inline bool impl_auto_team_size() const { return m_tune_team_size; }
+  static int vector_length_max() {
+    return ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+  }
+
+  static int verify_requested_vector_length(int requested_vector_length) {
+    int test_vector_length =
+        std::min(requested_vector_length, vector_length_max());
+
+    // Allow only power-of-two vector_length
+    if (!(is_integral_power_of_two(test_vector_length))) {
+      int test_pow2           = 1;
+      int constexpr warp_size = Experimental::Impl::HIPTraits::WarpSize;
+      while (test_pow2 < warp_size) {
+        test_pow2 <<= 1;
+        if (test_pow2 > test_vector_length) {
+          break;
+        }
+      }
+      test_vector_length = test_pow2 >> 1;
+    }
+
+    return test_vector_length;
+  }
+
+  static int scratch_size_max(int level) {
+    return (
+        level == 0 ? 1024 * 40 :  // FIXME_HIP arbitrarily setting this to 48kB
+            20 * 1024 * 1024);    // FIXME_HIP arbitrarily setting this to 20MB
+  }
+  inline void impl_set_vector_length(size_t size) { m_vector_length = size; }
+  inline void impl_set_team_size(size_t size) { m_team_size = size; }
+  int impl_vector_length() const { return m_vector_length; }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  KOKKOS_DEPRECATED int vector_length() const { return impl_vector_length(); }
+#endif
+
+  int team_size() const { return m_team_size; }
+
+  int league_size() const { return m_league_size; }
+
+  size_t scratch_size(int level, int team_size_ = -1) const {
+    if (team_size_ < 0) team_size_ = m_team_size;
+    return m_team_scratch_size[level] +
+           team_size_ * m_thread_scratch_size[level];
+  }
+
+  size_t team_scratch_size(int level) const {
+    return m_team_scratch_size[level];
+  }
+
+  size_t thread_scratch_size(int level) const {
+    return m_thread_scratch_size[level];
+  }
+
+  typename traits::execution_space space() const { return m_space; }
+
+  TeamPolicyInternal()
+      : m_space(typename traits::execution_space()),
+        m_league_size(0),
+        m_team_size(-1),
+        m_vector_length(0),
+        m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(::Kokkos::Experimental::Impl::HIPTraits::WarpSize),
+        m_tune_team_size(false),
+        m_tune_vector_length(false) {}
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     int team_size_request, int vector_length_request = 1)
+      : m_space(space_),
+        m_league_size(league_size_),
+        m_team_size(team_size_request),
+        m_vector_length(
+            (vector_length_request > 0)
+                ? verify_requested_vector_length(vector_length_request)
+                : (verify_requested_vector_length(1))),
+        m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(::Kokkos::Experimental::Impl::HIPTraits::WarpSize),
+        m_tune_team_size(bool(team_size_request <= 0)),
+        m_tune_vector_length(bool(vector_length_request <= 0)) {
+    // Make sure league size is permissible
+    if (league_size_ >=
+        static_cast<int>(
+            ::Kokkos::Experimental::Impl::hip_internal_maximum_grid_count()[0]))
+      Impl::throw_runtime_exception(
+          "Requested too large league_size for TeamPolicy on HIP execution "
+          "space.");
+
+    // Make sure total block size is permissible
+    if (m_team_size * m_vector_length >
+        ::Kokkos::Experimental::Impl::HIPTraits::MaxThreadsPerBlock) {
+      Impl::throw_runtime_exception(
+          std::string("Kokkos::TeamPolicy< HIP > the team size is too large. "
+                      "Team size x vector length must be smaller than 1024."));
+    }
+  }
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(space_, league_size_, -1, vector_length_request) {}
+  // FLAG
+  /** \brief  Specify league size and team size, request vector length*/
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+                     )
+      : TeamPolicyInternal(space_, league_size_, team_size_request, -1)
+
+  {}
+
+  /** \brief  Specify league size, request team size and vector length*/
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+
+                     )
+      : TeamPolicyInternal(space_, league_size_, -1, -1)
+
+  {}
+
+  TeamPolicyInternal(int league_size_, int team_size_request,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, vector_length_request) {}
+
+  TeamPolicyInternal(int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+                           vector_length_request) {}
+
+  /** \brief  Specify league size and team size, request vector length*/
+  TeamPolicyInternal(int league_size_, int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+
+                     )
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, -1)
+
+  {}
+
+  /** \brief  Specify league size, request team size and vector length*/
+  TeamPolicyInternal(int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+
+                     )
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+                           -1) {}
+
+  int chunk_size() const { return m_chunk_size; }
+
+  TeamPolicyInternal& set_chunk_size(typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  /** \brief set per team scratch size for a specific level of the scratch
+   * hierarchy */
+  TeamPolicyInternal& set_scratch_size(int level,
+                                       PerTeamValue const& per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  /** \brief set per thread scratch size for a specific level of the scratch
+   * hierarchy */
+  TeamPolicyInternal& set_scratch_size(int level,
+                                       PerThreadValue const& per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  /** \brief set per thread and per team scratch size for a specific level of
+   * the scratch hierarchy */
+  TeamPolicyInternal& set_scratch_size(int level, PerTeamValue const& per_team,
+                                       PerThreadValue const& per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  using member_type = Kokkos::Impl::HIPTeamMember;
+
+ protected:
+  template <BlockType BlockSize, class ClosureType, class FunctorType>
+  int internal_team_size_common(const FunctorType& f) const {
+    // FIXME_HIP: this could be unified with the
+    // internal_team_size_common_reduce
+    //            once we can turn c++17 constexpr on by default.
+    //            The problem right now is that we can't turn off the evaluation
+    //            of the Analysis' valuesize / StaticValueSize
+
+    const unsigned shmem_block  = team_scratch_size(0) + 2 * sizeof(double);
+    const unsigned shmem_thread = thread_scratch_size(0) + sizeof(double);
+    const int vector_length     = impl_vector_length();
+
+    const auto functor = [&f, shmem_block, shmem_thread, vector_length](
+                             const hipFuncAttributes& attr, int block_size) {
+      int functor_shmem =
+          ::Kokkos::Impl::FunctorTeamShmemSize<FunctorType>::value(
+              f, block_size / vector_length);
+      return shmem_block + shmem_thread * (block_size / vector_length) +
+             functor_shmem + attr.sharedSizeBytes;
+    };
+    int block_size;
+    // FIXME_HIP - could be if constexpr for c++17
+    if (BlockSize == BlockType::Max) {
+      block_size = ::Kokkos::Experimental::Impl::hip_get_max_team_blocksize<
+          ClosureType, typename traits::launch_bounds>(
+          space().impl_internal_space_instance(), functor);
+    } else {
+      block_size =
+          ::Kokkos::Experimental::Impl::hip_get_preferred_team_blocksize<
+              ClosureType, typename traits::launch_bounds>(
+              space().impl_internal_space_instance(), functor);
+    }
+    if (block_size == 0) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelFor< HIP > could not find a valid "
+                      "team size."));
+    }
+    return block_size / impl_vector_length();
+  }
+
+  template <BlockType BlockSize, class ClosureType, class FunctorType>
+  int internal_team_size_common_reduce(const FunctorType& f) const {
+    using Interface =
+        typename Impl::DeduceFunctorPatternInterface<ClosureType>::type;
+    using Analysis =
+        Impl::FunctorAnalysis<Interface, typename ClosureType::Policy,
+                              FunctorType>;
+
+    const unsigned shmem_block = team_scratch_size(0) + 2 * sizeof(double);
+    const unsigned shmem_thread =
+        thread_scratch_size(0) + sizeof(double) +
+        ((Analysis::StaticValueSize != 0) ? 0 : Analysis::value_size(f));
+    const int vector_length = impl_vector_length();
+
+    const auto functor = [&f, shmem_block, shmem_thread, vector_length](
+                             const hipFuncAttributes& attr, int block_size) {
+      int functor_shmem =
+          ::Kokkos::Impl::FunctorTeamShmemSize<FunctorType>::value(
+              f, block_size / vector_length);
+      return shmem_block + shmem_thread * (block_size / vector_length) +
+             functor_shmem + attr.sharedSizeBytes;
+    };
+    int block_size;
+    // FIXME_HIP - could be if constexpr for c++17
+    if (BlockSize == BlockType::Max) {
+      block_size = ::Kokkos::Experimental::Impl::hip_get_max_team_blocksize<
+          ClosureType, typename traits::launch_bounds>(
+          space().impl_internal_space_instance(), functor);
+    } else {
+      block_size =
+          ::Kokkos::Experimental::Impl::hip_get_preferred_team_blocksize<
+              ClosureType, typename traits::launch_bounds>(
+              space().impl_internal_space_instance(), functor);
+    }
+
+    if (block_size == 0) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > could not find a "
+                      "valid team size."));
+    }
+    // Currently we require Power-of-2 team size for reductions.
+    int p2 = 1;
+    while (p2 <= block_size) p2 *= 2;
+    p2 /= 2;
+    return p2 / impl_vector_length();
+  }
+
+  template <class ClosureType, class FunctorType>
+  int internal_team_size_max(const FunctorType& f) const {
+    return internal_team_size_common_reduce<BlockType::Max, ClosureType>(f);
+  }
+
+  template <class ClosureType, class FunctorType>
+  int internal_team_size_recommended(const FunctorType& f) const {
+    return internal_team_size_common_reduce<BlockType::Preferred, ClosureType>(
+        f);
+  }
+};
+
+__device__ inline int64_t hip_get_scratch_index(
+    Experimental::HIP::size_type league_size, int32_t* scratch_locks) {
+  int64_t threadid = 0;
+  __shared__ int64_t base_thread_id;
+  if (threadIdx.x == 0 && threadIdx.y == 0) {
+    int64_t const wraparound_len =
+        Kokkos::min(int64_t(league_size),
+                    (int64_t(Kokkos::Impl::g_device_hip_lock_arrays.n)) /
+                        (blockDim.x * blockDim.y));
+    threadid = (blockIdx.x * blockDim.z + threadIdx.z) % wraparound_len;
+    threadid *= blockDim.x * blockDim.y;
+    int done = 0;
+    while (!done) {
+      done = (0 == atomicCAS(&scratch_locks[threadid], 0, 1));
+      if (!done) {
+        threadid += blockDim.x * blockDim.y;
+        if (int64_t(threadid + blockDim.x * blockDim.y) >=
+            wraparound_len * blockDim.x * blockDim.y)
+          threadid = 0;
+      }
+    }
+    base_thread_id = threadid;
+  }
+  __syncthreads();
+  threadid = base_thread_id;
+  return threadid;
+}
+
+__device__ inline void hip_release_scratch_index(int32_t* scratch_locks,
+                                                 int64_t threadid) {
+  __syncthreads();
+  if (threadIdx.x == 0 && threadIdx.y == 0) {
+    scratch_locks[threadid] = 0;
+  }
+}
+
+template <typename FunctorType, typename... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::Experimental::HIP> {
+ public:
+  using Policy = TeamPolicyInternal<Kokkos::Experimental::HIP, Properties...>;
+  using functor_type = FunctorType;
+  using size_type    = ::Kokkos::Experimental::HIP::size_type;
+
+ private:
+  using member_type   = typename Policy::member_type;
+  using work_tag      = typename Policy::work_tag;
+  using launch_bounds = typename Policy::launch_bounds;
+
+  // Algorithmic constraints: blockDim.y is a power of two AND
+  // blockDim.y  == blockDim.z == 1 shared memory utilization:
+  //
+  //  [ team   reduce space ]
+  //  [ team   shared space ]
+
+  FunctorType const m_functor;
+  Policy const m_policy;
+  size_type const m_league_size;
+  int m_team_size;
+  size_type const m_vector_size;
+  int m_shmem_begin;
+  int m_shmem_size;
+  void* m_scratch_ptr[2];
+  size_t m_scratch_size[2];
+  int32_t* m_scratch_locks;
+  // Only let one ParallelFor/Reduce modify the team scratch memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::lock_guard<std::mutex> m_scratch_lock_guard;
+
+  template <typename TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
+      const member_type& member) const {
+    m_functor(member);
+  }
+
+  template <typename TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
+      const member_type& member) const {
+    m_functor(TagType(), member);
+  }
+
+ public:
+  __device__ inline void operator()() const {
+    // Iterate this block through the league
+    int64_t threadid = 0;
+    if (m_scratch_size[1] > 0) {
+      threadid = hip_get_scratch_index(m_league_size, m_scratch_locks);
+    }
+
+    int const int_league_size = static_cast<int>(m_league_size);
+    for (int league_rank = blockIdx.x; league_rank < int_league_size;
+         league_rank += gridDim.x) {
+      this->template exec_team<work_tag>(typename Policy::member_type(
+          ::Kokkos::Experimental::kokkos_impl_hip_shared_memory<void>(),
+          m_shmem_begin, m_shmem_size,
+          static_cast<void*>(static_cast<char*>(m_scratch_ptr[1]) +
+                             ptrdiff_t(threadid / (blockDim.x * blockDim.y)) *
+                                 m_scratch_size[1]),
+          m_scratch_size[1], league_rank, m_league_size));
+    }
+    if (m_scratch_size[1] > 0) {
+      hip_release_scratch_index(m_scratch_locks, threadid);
+    }
+  }
+
+  inline void execute() const {
+    int64_t const shmem_size_total = m_shmem_begin + m_shmem_size;
+    dim3 const grid(static_cast<int>(m_league_size), 1, 1);
+    dim3 const block(static_cast<int>(m_vector_size),
+                     static_cast<int>(m_team_size), 1);
+
+    using closure_type =
+        ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                    Kokkos::Experimental::HIP>;
+    ::Kokkos::Experimental::Impl::hip_parallel_launch<closure_type,
+                                                      launch_bounds>(
+        *this, grid, block, shmem_size_total,
+        m_policy.space().impl_internal_space_instance(),
+        true);  // copy to device and execute
+  }
+
+  ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()),
+        m_scratch_lock_guard(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_team_scratch_mutex) {
+    m_team_size = m_team_size >= 0 ? m_team_size
+                                   : arg_policy.team_size_recommended(
+                                         arg_functor, ParallelForTag());
+
+    m_shmem_begin = (sizeof(double) * (m_team_size + 2));
+    m_shmem_size =
+        (m_policy.scratch_size(0, m_team_size) +
+         FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
+    m_scratch_size[0] = m_policy.scratch_size(0, m_team_size);
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+    m_scratch_locks =
+        m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+    m_scratch_ptr[0] = nullptr;
+    m_scratch_ptr[1] =
+        m_team_size <= 0
+            ? nullptr
+            : m_policy.space()
+                  .impl_internal_space_instance()
+                  ->resize_team_scratch_space(
+                      static_cast<std::int64_t>(m_scratch_size[1]) *
+                      (std::min(static_cast<std::int64_t>(
+                                    Kokkos::Experimental::HIP::concurrency() /
+                                    (m_team_size * m_vector_size)),
+                                static_cast<std::int64_t>(m_league_size))));
+
+    int const shmem_size_total = m_shmem_begin + m_shmem_size;
+    if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+        shmem_size_total) {
+      Kokkos::Impl::throw_runtime_exception(std::string(
+          "Kokkos::Impl::ParallelFor< HIP > insufficient shared memory"));
+    }
+
+    size_t max_size = arg_policy.team_size_max(arg_functor, ParallelForTag());
+    if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
+      Kokkos::Impl::throw_runtime_exception(std::string(
+          "Kokkos::Impl::ParallelFor< HIP > requested too large team size."));
+    }
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::Experimental::HIP> {
+ public:
+  using Policy = TeamPolicyInternal<Kokkos::Experimental::HIP, Properties...>;
+
+ private:
+  using member_type   = typename Policy::member_type;
+  using work_tag      = typename Policy::work_tag;
+  using launch_bounds = typename Policy::launch_bounds;
+
+  using reducer_conditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using reducer_type_fwd = typename reducer_conditional::type;
+  using work_tag_fwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  work_tag, void>::type;
+
+  using analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         Policy, reducer_type_fwd>;
+
+  using pointer_type   = typename analysis::pointer_type;
+  using reference_type = typename analysis::reference_type;
+  using value_type     = typename analysis::value_type;
+
+ public:
+  using functor_type = FunctorType;
+  using size_type    = Kokkos::Experimental::HIP::size_type;
+
+  static int constexpr UseShflReduction = (analysis::StaticValueSize != 0);
+
+ private:
+  struct ShflReductionTag {};
+  struct SHMEMReductionTag {};
+
+  // Algorithmic constraints: blockDim.y is a power of two AND
+  // blockDim.y == blockDim.z == 1 shared memory utilization:
+  //
+  //  [ global reduce space ]
+  //  [ team   reduce space ]
+  //  [ team   shared space ]
+  //
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+  const bool m_result_ptr_host_accessible;
+  size_type* m_scratch_space;
+  size_type* m_scratch_flags;
+  size_type m_team_begin;
+  size_type m_shmem_begin;
+  size_type m_shmem_size;
+  void* m_scratch_ptr[2];
+  size_t m_scratch_size[2];
+  int32_t* m_scratch_locks;
+  const size_type m_league_size;
+  int m_team_size;
+  const size_type m_vector_size;
+  // Only let one ParallelFor/Reduce modify the team scratch memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::lock_guard<std::mutex> m_scratch_lock_guard;
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_team(
+      member_type const& member, reference_type update) const {
+    m_functor(member, update);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_team(
+      member_type const& member, reference_type update) const {
+    m_functor(TagType(), member, update);
+  }
+
+  __device__ inline void iterate_through_league(int const threadid,
+                                                reference_type value) const {
+    int const int_league_size = static_cast<int>(m_league_size);
+    for (int league_rank = blockIdx.x; league_rank < int_league_size;
+         league_rank += gridDim.x) {
+      this->template exec_team<work_tag>(
+          member_type(
+              Kokkos::Experimental::kokkos_impl_hip_shared_memory<char>() +
+                  m_team_begin,
+              m_shmem_begin, m_shmem_size,
+              reinterpret_cast<void*>(
+                  reinterpret_cast<char*>(m_scratch_ptr[1]) +
+                  static_cast<ptrdiff_t>(threadid / (blockDim.x * blockDim.y)) *
+                      m_scratch_size[1]),
+              m_scratch_size[1], league_rank, m_league_size),
+          value);
+    }
+  }
+
+ public:
+  __device__ inline void operator()() const {
+    int64_t threadid = 0;
+    if (m_scratch_size[1] > 0) {
+      threadid = hip_get_scratch_index(m_league_size, m_scratch_locks);
+    }
+
+    using ReductionTag = std::conditional_t<UseShflReduction, ShflReductionTag,
+                                            SHMEMReductionTag>;
+    run(ReductionTag{}, threadid);
+
+    if (m_scratch_size[1] > 0) {
+      hip_release_scratch_index(m_scratch_locks, threadid);
+    }
+  }
+
+  __device__ inline void run(SHMEMReductionTag, int const threadid) const {
+    typename analysis::Reducer final_reducer(
+        &reducer_conditional::select(m_functor, m_reducer));
+
+    integral_nonzero_constant<size_type, analysis::StaticValueSize /
+                                             sizeof(size_type)> const
+        word_count(analysis::value_size(
+                       reducer_conditional::select(m_functor, m_reducer)) /
+                   sizeof(size_type));
+
+    reference_type value = final_reducer.init(
+        Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
+        threadIdx.y * word_count.value);
+
+    // Iterate this block through the league
+    iterate_through_league(threadid, value);
+
+    // Reduce with final value at blockDim.y - 1 location.
+    bool do_final_reduce = (m_league_size == 0);
+    if (!do_final_reduce)
+      do_final_reduce =
+          hip_single_inter_block_reduce_scan<false, FunctorType, work_tag>(
+              reducer_conditional::select(m_functor, m_reducer), blockIdx.x,
+              gridDim.x,
+              Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>(),
+              m_scratch_space, m_scratch_flags);
+    if (do_final_reduce) {
+      // This is the final block with the final result at the final threads'
+      // location
+
+      size_type* const shared =
+          Kokkos::Experimental::kokkos_impl_hip_shared_memory<size_type>() +
+          (blockDim.y - 1) * word_count.value;
+      size_type* const global = m_result_ptr_device_accessible
+                                    ? reinterpret_cast<size_type*>(m_result_ptr)
+                                    : m_scratch_space;
+
+      if (threadIdx.y == 0) {
+        final_reducer.final(reinterpret_cast<value_type*>(shared));
+      }
+
+      if (Kokkos::Experimental::Impl::HIPTraits::WarpSize < word_count.value) {
+        __syncthreads();
+      }
+
+      for (unsigned i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+        global[i] = shared[i];
+      }
+    }
+  }
+
+  __device__ inline void run(ShflReductionTag, int const threadid) const {
+    typename analysis::Reducer final_reducer(
+        &reducer_conditional::select(m_functor, m_reducer));
+
+    value_type value;
+    final_reducer.init(&value);
+
+    // Iterate this block through the league
+    iterate_through_league(threadid, value);
+
+    pointer_type const result =
+        m_result_ptr_device_accessible
+            ? m_result_ptr
+            : reinterpret_cast<pointer_type>(m_scratch_space);
+
+    value_type init;
+    final_reducer.init(&init);
+    if (m_league_size == 0) {
+      final_reducer.final(&value);
+      *result = value;
+    } else if (Impl::hip_inter_block_shuffle_reduction(
+                   value, init, final_reducer, m_scratch_space, result,
+                   m_scratch_flags, blockDim.y)) {
+      unsigned int const id = threadIdx.y * blockDim.x + threadIdx.x;
+      if (id == 0) {
+        final_reducer.final(&value);
+        *result = value;
+      }
+    }
+  }
+
+  inline void execute() {
+    typename analysis::Reducer final_reducer(
+        &reducer_conditional::select(m_functor, m_reducer));
+
+    const bool is_empty_range  = m_league_size == 0 || m_team_size == 0;
+    const bool need_device_set = analysis::has_init_member_function ||
+                                 analysis::has_final_member_function ||
+                                 !m_result_ptr_host_accessible ||
+                                 !std::is_same<ReducerType, InvalidType>::value;
+    if (!is_empty_range || need_device_set) {
+      const int block_count =
+          UseShflReduction
+              ? std::min(
+                    m_league_size,
+                    size_type(1024 *
+                              Kokkos::Experimental::Impl::HIPTraits::WarpSize))
+              : std::min(static_cast<int>(m_league_size), m_team_size);
+
+      m_scratch_space = Kokkos::Experimental::Impl::hip_internal_scratch_space(
+          m_policy.space(), analysis::value_size(reducer_conditional::select(
+                                m_functor, m_reducer)) *
+                                block_count);
+      m_scratch_flags = Kokkos::Experimental::Impl::hip_internal_scratch_flags(
+          m_policy.space(), sizeof(size_type));
+
+      dim3 block(m_vector_size, m_team_size, 1);
+      dim3 grid(block_count, 1, 1);
+      if (is_empty_range) {
+        block = dim3(1, 1, 1);
+        grid  = dim3(1, 1, 1);
+      }
+      const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
+
+      using closure_type =
+          ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                         ReducerType, Kokkos::Experimental::HIP>;
+      Kokkos::Experimental::Impl::hip_parallel_launch<closure_type,
+                                                      launch_bounds>(
+          *this, grid, block, shmem_size_total,
+          m_policy.space().impl_internal_space_instance(),
+          true);  // copy to device and execute
+
+      if (!m_result_ptr_device_accessible) {
+        m_policy.space().impl_internal_space_instance()->fence();
+
+        if (m_result_ptr) {
+          const int size = analysis::value_size(
+              reducer_conditional::select(m_functor, m_reducer));
+          DeepCopy<HostSpace, Kokkos::Experimental::HIPSpace>(
+              m_result_ptr, m_scratch_space, size);
+        }
+      }
+    } else {
+      if (m_result_ptr) {
+        final_reducer.init(m_result_ptr);
+      }
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(
+      FunctorType const& arg_functor, Policy const& arg_policy,
+      ViewType const& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_team_begin(0),
+        m_shmem_begin(0),
+        m_shmem_size(0),
+        m_scratch_ptr{nullptr, nullptr},
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()),
+        m_scratch_lock_guard(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_team_scratch_mutex) {
+    m_team_size = m_team_size >= 0 ? m_team_size
+                                   : arg_policy.team_size_recommended(
+                                         arg_functor, ParallelReduceTag());
+
+    m_team_begin =
+        UseShflReduction
+            ? 0
+            : hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                       work_tag>(arg_functor,
+                                                                 m_team_size);
+    m_shmem_begin = sizeof(double) * (m_team_size + 2);
+    m_shmem_size =
+        m_policy.scratch_size(0, m_team_size) +
+        FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
+    m_scratch_size[0] = m_shmem_size;
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+    m_scratch_locks =
+        m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+    m_scratch_ptr[1] =
+        m_team_size <= 0
+            ? nullptr
+            : m_policy.space()
+                  .impl_internal_space_instance()
+                  ->resize_team_scratch_space(
+                      static_cast<std::int64_t>(m_scratch_size[1]) *
+                      (std::min(static_cast<std::int64_t>(
+                                    Kokkos::Experimental::HIP::concurrency() /
+                                    (m_team_size * m_vector_size)),
+                                static_cast<std::int64_t>(m_league_size))));
+
+    // The global parallel_reduce does not support vector_length other than 1 at
+    // the moment
+    if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
+          "greater than 1 is not currently supported for HIP for dynamic "
+          "sized reduction types.");
+
+    if ((m_team_size < Kokkos::Experimental::Impl::HIPTraits::WarpSize) &&
+        !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
+          "than 64 is not currently supported with HIP for dynamic sized "
+          "reduction types.");
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+
+    const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
+
+    if (!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
+        !UseShflReduction) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > bad team size"));
+    }
+
+    if (m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+        shmem_size_total) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > requested too much "
+                      "L0 scratch memory"));
+    }
+
+    size_t max_size =
+        arg_policy.team_size_max(arg_functor, ParallelReduceTag());
+    if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > requested too "
+                      "large team size."));
+    }
+  }
+
+  ParallelReduce(FunctorType const& arg_functor, Policy const& arg_policy,
+                 ReducerType const& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_result_ptr_host_accessible(
+            MemorySpaceAccess<Kokkos::HostSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_scratch_space(nullptr),
+        m_scratch_flags(nullptr),
+        m_team_begin(0),
+        m_shmem_begin(0),
+        m_shmem_size(0),
+        m_scratch_ptr{nullptr, nullptr},
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()),
+        m_scratch_lock_guard(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_team_scratch_mutex) {
+    m_team_size = m_team_size >= 0
+                      ? m_team_size
+                      : arg_policy.team_size_recommended(arg_functor, reducer,
+                                                         ParallelReduceTag());
+    m_team_begin =
+        UseShflReduction
+            ? 0
+            : hip_single_inter_block_reduce_scan_shmem<false, FunctorType,
+                                                       work_tag>(arg_functor,
+                                                                 m_team_size);
+    m_shmem_begin = sizeof(double) * (m_team_size + 2);
+    m_shmem_size =
+        m_policy.scratch_size(0, m_team_size) +
+        FunctorTeamShmemSize<FunctorType>::value(arg_functor, m_team_size);
+    m_scratch_size[0] = m_shmem_size;
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+    m_scratch_locks =
+        m_policy.space().impl_internal_space_instance()->m_scratch_locks;
+    m_scratch_ptr[1] =
+        m_team_size <= 0
+            ? nullptr
+            : m_policy.space()
+                  .impl_internal_space_instance()
+                  ->resize_team_scratch_space(
+                      static_cast<std::int64_t>(m_scratch_size[1]) *
+                      (std::min(static_cast<std::int64_t>(
+                                    Kokkos::Experimental::HIP::concurrency() /
+                                    (m_team_size * m_vector_size)),
+                                static_cast<std::int64_t>(m_league_size))));
+
+    // The global parallel_reduce does not support vector_length other than 1 at
+    // the moment
+    if ((arg_policy.impl_vector_length() > 1) && !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a vector length of "
+          "greater than 1 is not currently supported for HIP for dynamic "
+          "sized reduction types.");
+
+    if ((m_team_size < Kokkos::Experimental::Impl::HIPTraits::WarpSize) &&
+        !UseShflReduction)
+      Impl::throw_runtime_exception(
+          "Kokkos::parallel_reduce with a TeamPolicy using a team_size smaller "
+          "than 64 is not currently supported with HIP for dynamic sized "
+          "reduction types.");
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+
+    const int shmem_size_total = m_team_begin + m_shmem_begin + m_shmem_size;
+    if ((!Kokkos::Impl::is_integral_power_of_two(m_team_size) &&
+         !UseShflReduction) ||
+        m_policy.space().impl_internal_space_instance()->m_maxShmemPerBlock <
+            shmem_size_total) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > bad team size"));
+    }
+
+    size_t max_size =
+        arg_policy.team_size_max(arg_functor, reducer, ParallelReduceTag());
+    if (static_cast<int>(m_team_size) > static_cast<int>(max_size)) {
+      Kokkos::Impl::throw_runtime_exception(
+          std::string("Kokkos::Impl::ParallelReduce< HIP > requested too "
+                      "large team size."));
+    }
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_ReduceScan.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_ReduceScan.hpp
new file mode 100644 (file)
index 0000000..1091ad5
--- /dev/null
@@ -0,0 +1,575 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_REDUCESCAN_HPP
+#define KOKKOS_HIP_REDUCESCAN_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(__HIPCC__)
+
+#include <HIP/Kokkos_HIP_Vectorization.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+// Reduction-only implementation
+//----------------------------------------------------------------------------
+
+template <class FunctorType, bool UseShfl>
+struct HIPReductionsFunctor;
+
+template <typename FunctorType>
+struct HIPReductionsFunctor<FunctorType, true> {
+  using pointer_type = typename FunctorType::pointer_type;
+  using Scalar       = typename FunctorType::value_type;
+
+  __device__ static inline void scalar_intra_warp_reduction(
+      FunctorType const& functor,
+      Scalar value,            // Contribution
+      bool const skip_vector,  // Skip threads if Kokkos vector lanes are not
+                               // part of the reduction
+      int const width,         // How much of the warp participates
+      Scalar& result) {
+    for (int delta = skip_vector ? blockDim.x : 1; delta < width; delta *= 2) {
+      Scalar tmp = Kokkos::Experimental::shfl_down(value, delta, width);
+      functor.join(&value, &tmp);
+    }
+
+    Experimental::Impl::in_place_shfl(result, value, 0, width);
+  }
+
+  __device__ static inline void scalar_intra_block_reduction(
+      FunctorType const& functor, Scalar value, bool const skip,
+      Scalar* my_global_team_buffer_element, int const shared_elements,
+      Scalar* shared_team_buffer_element) {
+    unsigned int constexpr warp_size =
+        Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+    int const warp_id = (threadIdx.y * blockDim.x) / warp_size;
+    Scalar* const my_shared_team_buffer_element =
+        shared_team_buffer_element + warp_id % shared_elements;
+
+    // Warp Level Reduction, ignoring Kokkos vector entries
+    scalar_intra_warp_reduction(functor, value, skip, warp_size, value);
+
+    if (warp_id < shared_elements) {
+      *my_shared_team_buffer_element = value;
+    }
+    // Wait for every warp to be done before using one warp to do the final
+    // cross warp reduction
+    __syncthreads();
+
+    int const num_warps = blockDim.x * blockDim.y / warp_size;
+    for (int w = shared_elements; w < num_warps; w += shared_elements) {
+      if (warp_id >= w && warp_id < w + shared_elements) {
+        if ((threadIdx.y * blockDim.x + threadIdx.x) % warp_size == 0)
+          functor.join(my_shared_team_buffer_element, &value);
+      }
+      __syncthreads();
+    }
+
+    if (warp_id == 0) {
+      functor.init(&value);
+      for (unsigned int i = threadIdx.y * blockDim.x + threadIdx.x;
+           i < blockDim.y * blockDim.x / warp_size; i += warp_size) {
+        functor.join(&value, &shared_team_buffer_element[i]);
+      }
+      scalar_intra_warp_reduction(functor, value, false, warp_size,
+                                  *my_global_team_buffer_element);
+    }
+  }
+
+  __device__ static inline bool scalar_inter_block_reduction(
+      FunctorType const& functor,
+      ::Kokkos::Experimental::HIP::size_type const block_count,
+      ::Kokkos::Experimental::HIP::size_type* const shared_data,
+      ::Kokkos::Experimental::HIP::size_type* const global_data,
+      ::Kokkos::Experimental::HIP::size_type* const global_flags) {
+    Scalar* const global_team_buffer_element =
+        reinterpret_cast<Scalar*>(global_data);
+    Scalar* const my_global_team_buffer_element =
+        global_team_buffer_element + blockIdx.x;
+    Scalar* shared_team_buffer_elements =
+        reinterpret_cast<Scalar*>(shared_data);
+    Scalar value = shared_team_buffer_elements[threadIdx.y];
+    unsigned int constexpr warp_size =
+        Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+    int shared_elements = blockDim.x * blockDim.y / warp_size;
+    int global_elements = block_count;
+    __syncthreads();
+
+    scalar_intra_block_reduction(functor, value, true,
+                                 my_global_team_buffer_element, shared_elements,
+                                 shared_team_buffer_elements);
+    __syncthreads();
+
+    // Use the last block that is done to do the do the reduction across the
+    // block
+    __shared__ unsigned int num_teams_done;
+    if (threadIdx.x + threadIdx.y == 0) {
+      num_teams_done = Kokkos::atomic_fetch_add(global_flags, 1) + 1;
+    }
+    bool is_last_block = false;
+    // FIXME_HIP HIP does not support syncthreads_or. That's why we need to make
+    // num_teams_done __shared__
+    // if (__syncthreads_or(num_teams_done == gridDim.x)) {*/
+    __syncthreads();
+    if (num_teams_done == gridDim.x) {
+      is_last_block = true;
+      *global_flags = 0;
+      functor.init(&value);
+      for (int i = threadIdx.y * blockDim.x + threadIdx.x; i < global_elements;
+           i += blockDim.x * blockDim.y) {
+        functor.join(&value, &global_team_buffer_element[i]);
+      }
+      scalar_intra_block_reduction(
+          functor, value, false, shared_team_buffer_elements + blockDim.y - 1,
+          shared_elements, shared_team_buffer_elements);
+    }
+
+    return is_last_block;
+  }
+};
+
+template <typename FunctorType>
+struct HIPReductionsFunctor<FunctorType, false> {
+  using pointer_type = typename FunctorType::pointer_type;
+  using Scalar       = typename FunctorType::value_type;
+
+  __device__ static inline void scalar_intra_warp_reduction(
+      FunctorType const& functor,
+      Scalar* value,           // Contribution
+      bool const skip_vector,  // Skip threads if Kokkos vector lanes are not
+                               // part of the reduction
+      int const width)         // How much of the warp participates
+  {
+    int const lane_id = (threadIdx.y * blockDim.x + threadIdx.x) %
+                        ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+    for (int delta = skip_vector ? blockDim.x : 1; delta < width; delta *= 2) {
+      if (lane_id + delta < ::Kokkos::Experimental::Impl::HIPTraits::WarpSize) {
+        functor.join(value, value + delta);
+      }
+    }
+    *value = *(value - lane_id);
+  }
+
+  __device__ static inline void scalar_intra_block_reduction(
+      FunctorType const& functor, Scalar value, bool const skip, Scalar* result,
+      int const /*shared_elements*/, Scalar* shared_team_buffer_element) {
+    int const warp_id = (threadIdx.y * blockDim.x) /
+                        ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+    Scalar* const my_shared_team_buffer_element =
+        shared_team_buffer_element + threadIdx.y * blockDim.x + threadIdx.x;
+    *my_shared_team_buffer_element = value;
+    // Warp Level Reduction, ignoring Kokkos vector entries
+    scalar_intra_warp_reduction(
+        functor, my_shared_team_buffer_element, skip,
+        ::Kokkos::Experimental::Impl::HIPTraits::WarpSize);
+    // Wait for every warp to be done before using one warp to do final cross
+    // warp reduction
+    __syncthreads();
+
+    if (warp_id == 0) {
+      const unsigned int delta =
+          (threadIdx.y * blockDim.x + threadIdx.x) *
+          ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+      if (delta < blockDim.x * blockDim.y)
+        *my_shared_team_buffer_element = shared_team_buffer_element[delta];
+      scalar_intra_warp_reduction(
+          functor, my_shared_team_buffer_element, false,
+          blockDim.x * blockDim.y /
+              ::Kokkos::Experimental::Impl::HIPTraits::WarpSize);
+      if (threadIdx.x + threadIdx.y == 0) *result = *shared_team_buffer_element;
+    }
+  }
+
+  __device__ static inline bool scalar_inter_block_reduction(
+      FunctorType const& functor,
+      ::Kokkos::Experimental::HIP::size_type const block_count,
+      ::Kokkos::Experimental::HIP::size_type* const shared_data,
+      ::Kokkos::Experimental::HIP::size_type* const global_data,
+      ::Kokkos::Experimental::HIP::size_type* const global_flags) {
+    Scalar* const global_team_buffer_element =
+        reinterpret_cast<Scalar*>(global_data);
+    Scalar* const my_global_team_buffer_element =
+        global_team_buffer_element + blockIdx.x;
+    Scalar* shared_team_buffer_elements =
+        reinterpret_cast<Scalar*>(shared_data);
+    Scalar value        = shared_team_buffer_elements[threadIdx.y];
+    int shared_elements = (blockDim.x * blockDim.y) /
+                          ::Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+    int global_elements = block_count;
+    __syncthreads();
+
+    // Do the scalar reduction inside each block
+    scalar_intra_block_reduction(functor, value, true,
+                                 my_global_team_buffer_element, shared_elements,
+                                 shared_team_buffer_elements);
+    __syncthreads();
+
+    // Use the last block that is done to do the do the reduction across the
+    // block
+    __shared__ unsigned int num_teams_done;
+    if (threadIdx.x + threadIdx.y == 0) {
+      num_teams_done = Kokkos::atomic_fetch_add(global_flags, 1) + 1;
+    }
+    bool is_last_block = false;
+    // FIXME_HIP HIP does not support syncthreads_or. That's why we need to make
+    // num_teams_done __shared__
+    // if (__syncthreads_or(num_teams_done == gridDim.x)) {*/
+    __syncthreads();
+    if (num_teams_done == gridDim.x) {
+      is_last_block = true;
+      *global_flags = 0;
+      functor.init(&value);
+      for (int i = threadIdx.y * blockDim.x + threadIdx.x; i < global_elements;
+           i += blockDim.x * blockDim.y) {
+        functor.join(&value, &global_team_buffer_element[i]);
+      }
+      scalar_intra_block_reduction(
+          functor, value, false, shared_team_buffer_elements + (blockDim.y - 1),
+          shared_elements, shared_team_buffer_elements);
+    }
+
+    return is_last_block;
+  }
+};
+
+//----------------------------------------------------------------------------
+// Fused reduction and scan implementation
+//----------------------------------------------------------------------------
+/*
+ *  Algorithmic constraints:
+ *   (a) blockDim.y <= 1024
+ *   (b) blockDim.x == blockDim.z == 1
+ */
+
+template <bool DoScan, class FunctorType>
+__device__ void hip_intra_block_reduce_scan(
+    FunctorType const& functor,
+    typename FunctorType::pointer_type const base_data) {
+  using pointer_type = typename FunctorType::pointer_type;
+
+  const unsigned value_count = functor.length();
+  const unsigned not_less_power_of_two =
+      (1 << (Impl::int_log2(blockDim.y - 1) + 1));
+  const unsigned BlockSizeMask = not_less_power_of_two - 1;
+  // There is at most one warp that is neither completely full or empty.
+  // For that warp, we shift all indices logically to the end and ignore join
+  // operations with unassigned indices in the warp when performing the intra
+  // warp reduction/scan.
+  const bool is_full_warp =
+      (((threadIdx.y >> Experimental::Impl::HIPTraits::WarpIndexShift) + 1)
+       << Experimental::Impl::HIPTraits::WarpIndexShift) <= blockDim.y;
+
+  auto block_reduce_step = [&functor, value_count](
+                               int const R, pointer_type const TD, int const S,
+                               pointer_type memory_start, int index_shift) {
+    const auto join_ptr = TD - (value_count << S) + value_count * index_shift;
+    if (R > ((1 << S) - 1) && join_ptr >= memory_start) {
+      functor.join(TD, join_ptr);
+    }
+  };
+
+  // Intra-warp reduction:
+  {
+    const unsigned mapped_idx =
+        threadIdx.y + (is_full_warp
+                           ? 0
+                           : (not_less_power_of_two - blockDim.y) &
+                                 (Experimental::Impl::HIPTraits::WarpSize - 1));
+    const pointer_type tdata_intra = base_data + value_count * threadIdx.y;
+    const pointer_type warp_start =
+        base_data +
+        value_count *
+            ((threadIdx.y >> Experimental::Impl::HIPTraits::WarpIndexShift)
+             << Experimental::Impl::HIPTraits::WarpIndexShift);
+    block_reduce_step(mapped_idx, tdata_intra, 0, warp_start, 0);
+    block_reduce_step(mapped_idx, tdata_intra, 1, warp_start, 0);
+    block_reduce_step(mapped_idx, tdata_intra, 2, warp_start, 0);
+    block_reduce_step(mapped_idx, tdata_intra, 3, warp_start, 0);
+    block_reduce_step(mapped_idx, tdata_intra, 4, warp_start, 0);
+    block_reduce_step(mapped_idx, tdata_intra, 5, warp_start, 0);
+  }
+
+  __syncthreads();  // Wait for all warps to reduce
+
+  // Inter-warp reduce-scan by a single warp to avoid extra synchronizations
+  {
+    // There is at most one warp where the memory address to be used is not
+    // (HIPTraits::WarpSize - 1) away from the warp start adress. For the
+    // following reduction, we shift all indices logically to the end of the
+    // next power-of-two to the number of warps.
+    const unsigned n_active_warps =
+        ((blockDim.y - 1) >> Experimental::Impl::HIPTraits::WarpIndexShift) + 1;
+    if (threadIdx.y < n_active_warps) {
+      const bool is_full_warp_inter =
+          threadIdx.y <
+          (blockDim.y >> Experimental::Impl::HIPTraits::WarpIndexShift);
+      pointer_type const tdata_inter =
+          base_data +
+          value_count *
+              (is_full_warp_inter
+                   ? (threadIdx.y
+                      << Experimental::Impl::HIPTraits::WarpIndexShift) +
+                         (Experimental::Impl::HIPTraits::WarpSize - 1)
+                   : blockDim.y - 1);
+      const unsigned index_shift =
+          is_full_warp_inter
+              ? 0
+              : blockDim.y - (threadIdx.y
+                              << Experimental::Impl::HIPTraits::WarpIndexShift);
+      const int rtid_inter =
+          (threadIdx.y << Experimental::Impl::HIPTraits::WarpIndexShift) +
+          (Experimental::Impl::HIPTraits::WarpSize - 1) - index_shift;
+
+      if ((1 << 6) < BlockSizeMask) {
+        block_reduce_step(rtid_inter, tdata_inter, 6, base_data, index_shift);
+      }
+      if ((1 << 7) < BlockSizeMask) {
+        block_reduce_step(rtid_inter, tdata_inter, 7, base_data, index_shift);
+      }
+      if ((1 << 8) < BlockSizeMask) {
+        block_reduce_step(rtid_inter, tdata_inter, 8, base_data, index_shift);
+      }
+      if ((1 << 9) < BlockSizeMask) {
+        block_reduce_step(rtid_inter, tdata_inter, 9, base_data, index_shift);
+      }
+      if ((1 << 10) < BlockSizeMask) {
+        block_reduce_step(rtid_inter, tdata_inter, 10, base_data, index_shift);
+      }
+    }
+  }
+
+  __syncthreads();  // Wait for inter-warp reduce-scan to complete
+
+  if (DoScan) {
+    // Update all the values for the respective warps (except for the last one)
+    // by adding from the last value of the previous warp.
+    const unsigned int WarpMask = Experimental::Impl::HIPTraits::WarpSize - 1;
+    const int is_last_thread_in_warp =
+        is_full_warp ? ((threadIdx.y & WarpMask) ==
+                        Experimental::Impl::HIPTraits::WarpSize - 1)
+                     : (threadIdx.y == blockDim.y - 1);
+    if (threadIdx.y >= Experimental::Impl::HIPTraits::WarpSize &&
+        !is_last_thread_in_warp) {
+      const int offset_to_previous_warp_total = (threadIdx.y & (~WarpMask)) - 1;
+      functor.join(base_data + value_count * threadIdx.y,
+                   base_data + value_count * offset_to_previous_warp_total);
+    }
+  }
+}
+
+//----------------------------------------------------------------------------
+/**\brief  Input value-per-thread starting at 'shared_data'.
+ *         Reduction value at last thread's location.
+ *
+ *  If 'DoScan' then write blocks' scan values and block-groups' scan values.
+ *
+ *  Global reduce result is in the last threads' 'shared_data' location.
+ */
+
+template <bool DoScan, class FunctorType>
+__device__ bool hip_single_inter_block_reduce_scan_impl(
+    FunctorType const& functor,
+    ::Kokkos::Experimental::HIP::size_type const block_id,
+    ::Kokkos::Experimental::HIP::size_type const block_count,
+    ::Kokkos::Experimental::HIP::size_type* const shared_data,
+    ::Kokkos::Experimental::HIP::size_type* const global_data,
+    ::Kokkos::Experimental::HIP::size_type* const global_flags) {
+  using size_type = ::Kokkos::Experimental::HIP::size_type;
+
+  using value_type   = typename FunctorType::value_type;
+  using pointer_type = typename FunctorType::pointer_type;
+
+  // '__ffs' = position of the least significant bit set to 1.
+  // 'blockDim.y' is guaranteed to be a power of two so this
+  // is the integral shift value that can replace an integral divide.
+  unsigned int const BlockSizeShift = __ffs(blockDim.y) - 1;
+  unsigned int const BlockSizeMask  = blockDim.y - 1;
+
+  // Must have power of two thread count
+  if (BlockSizeMask & blockDim.y) {
+    Kokkos::abort(
+        "HIP::hip_single_inter_block_reduce_scan requires power-of-two "
+        "blockDim");
+  }
+
+  const integral_nonzero_constant<
+      size_type, std::is_pointer<typename FunctorType::reference_type>::value
+                     ? 0
+                     : sizeof(value_type) / sizeof(size_type)>
+      word_count((sizeof(value_type) * functor.length()) / sizeof(size_type));
+
+  // Reduce the accumulation for the entire block.
+  hip_intra_block_reduce_scan<false>(functor, pointer_type(shared_data));
+
+  {
+    // Write accumulation total to global scratch space.
+    // Accumulation total is the last thread's data.
+    size_type* const shared = shared_data + word_count.value * BlockSizeMask;
+    size_type* const global = global_data + word_count.value * block_id;
+
+    for (size_t i = threadIdx.y; i < word_count.value; i += blockDim.y) {
+      global[i] = shared[i];
+    }
+  }
+
+  // Contributing blocks note that their contribution has been completed via an
+  // atomic-increment flag If this block is not the last block to contribute to
+  // this group then the block is done.
+  // FIXME_HIP __syncthreads_or is not supported by HIP yet.
+  // const bool is_last_block = !__syncthreads_or(
+  //    threadIdx.y
+  //        ? 0
+  //        : (1 + atomicInc(global_flags, block_count - 1) < block_count));
+  __shared__ int n_done;
+  n_done = 0;
+  __syncthreads();
+  if (threadIdx.y == 0) {
+    n_done = 1 + atomicInc(global_flags, block_count - 1);
+  }
+  __syncthreads();
+  bool const is_last_block = (n_done == static_cast<int>(block_count));
+
+  if (is_last_block) {
+    size_type const b = (static_cast<long long int>(block_count) *
+                         static_cast<long long int>(threadIdx.y)) >>
+                        BlockSizeShift;
+    size_type const e = (static_cast<long long int>(block_count) *
+                         static_cast<long long int>(threadIdx.y + 1)) >>
+                        BlockSizeShift;
+
+    {
+      pointer_type const shared_data_thread = reinterpret_cast<pointer_type>(
+          shared_data + word_count.value * threadIdx.y);
+      /* reference_type shared_value = */ functor.init(shared_data_thread);
+
+      for (size_type i = b; i < e; ++i) {
+        functor.join(
+            shared_data_thread,
+            reinterpret_cast<pointer_type>(global_data + word_count.value * i));
+      }
+    }
+
+    hip_intra_block_reduce_scan<DoScan>(functor, pointer_type(shared_data));
+
+    if (DoScan) {
+      pointer_type const shared_value = reinterpret_cast<pointer_type>(
+          shared_data +
+          word_count.value * (threadIdx.y ? threadIdx.y - 1 : blockDim.y));
+
+      if (!threadIdx.y) {
+        functor.init(shared_value);
+      }
+
+      // Join previous inclusive scan value to each member
+      for (size_type i = b; i < e; ++i) {
+        pointer_type const global_value =
+            reinterpret_cast<pointer_type>(global_data + word_count.value * i);
+        functor.join(shared_value, global_value);
+        functor.copy(global_value, shared_value);
+      }
+    }
+  }
+
+  return is_last_block;
+}
+
+template <bool DoScan, typename FunctorType>
+__device__ bool hip_single_inter_block_reduce_scan(
+    FunctorType const& functor,
+    ::Kokkos::Experimental::HIP::size_type const block_id,
+    ::Kokkos::Experimental::HIP::size_type const block_count,
+    ::Kokkos::Experimental::HIP::size_type* const shared_data,
+    ::Kokkos::Experimental::HIP::size_type* const global_data,
+    ::Kokkos::Experimental::HIP::size_type* const global_flags) {
+  // If we are doing a reduction and we don't do an array reduction, we use the
+  // reduction-only path. Otherwise, we use the common path between reduction
+  // and scan.
+  if (!DoScan && !std::is_pointer<typename FunctorType::reference_type>::value)
+    // FIXME_HIP_PERFORMANCE I don't know where 16 comes from. This inequality
+    // determines if we use shared memory (false) or shuffle (true)
+    return Kokkos::Impl::HIPReductionsFunctor<
+        FunctorType, (sizeof(typename FunctorType::value_type) >
+                      16)>::scalar_inter_block_reduction(functor, block_count,
+                                                         shared_data,
+                                                         global_data,
+                                                         global_flags);
+  else {
+    return hip_single_inter_block_reduce_scan_impl<DoScan>(
+        functor, block_id, block_count, shared_data, global_data, global_flags);
+  }
+}
+
+// Size in bytes required for inter block reduce or scan
+template <bool DoScan, class FunctorType, class ArgTag>
+inline std::enable_if_t<DoScan, unsigned>
+hip_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
+                                         const unsigned BlockSize) {
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                         RangePolicy<Experimental::HIP, ArgTag>,
+                                         FunctorType>;
+
+  return (BlockSize + 2) * Analysis::value_size(functor);
+}
+
+template <bool DoScan, class FunctorType, class ArgTag>
+inline std::enable_if_t<!DoScan, unsigned>
+hip_single_inter_block_reduce_scan_shmem(const FunctorType& functor,
+                                         const unsigned BlockSize) {
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         RangePolicy<Experimental::HIP, ArgTag>,
+                                         FunctorType>;
+
+  return (BlockSize + 2) * Analysis::value_size(functor);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Shuffle_Reduce.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Shuffle_Reduce.hpp
new file mode 100644 (file)
index 0000000..eb85ed4
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_SHUFFLE_REDUCE_HPP
+#define KOKKOS_HIP_SHUFFLE_REDUCE_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(__HIPCC__)
+
+#include <HIP/Kokkos_HIP_Vectorization.hpp>
+
+#include <climits>
+
+namespace Kokkos {
+namespace Impl {
+
+/* Algorithmic constraints:
+ *   (a) threads with the same threadIdx.x have same value
+ *   (b) blockDim.x == power of two
+ *   (x) blockDim.z == 1
+ */
+template <typename ValueType, typename ReducerType>
+__device__ inline void hip_intra_warp_shuffle_reduction(
+    ValueType& result, ReducerType const& reducer,
+    uint32_t const max_active_thread = blockDim.y) {
+  unsigned int shift = 1;
+
+  // Reduce over values from threads with different threadIdx.y
+  unsigned int constexpr warp_size =
+      Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+  while (blockDim.x * shift < warp_size) {
+    ValueType const tmp =
+        Kokkos::Experimental::shfl_down(result, blockDim.x * shift, warp_size);
+    // Only join if upper thread is active (this allows non power of two for
+    // blockDim.y)
+    if (threadIdx.y + shift < max_active_thread) {
+      reducer.join(&result, &tmp);
+    }
+    shift *= 2;
+  }
+
+  // Broadcast the result to all the threads in the warp
+  result = Kokkos::Experimental::shfl(result, 0, warp_size);
+}
+
+template <typename ValueType, typename ReducerType>
+__device__ inline void hip_inter_warp_shuffle_reduction(
+    ValueType& value, const ReducerType& reducer,
+    const int max_active_thread = blockDim.y) {
+  unsigned int constexpr warp_size =
+      Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+  int constexpr step_width = 8;
+  // Depending on the ValueType __shared__ memory must be aligned up to 8 byte
+  // boundaries. The reason not to use ValueType directly is that for types with
+  // constructors it could lead to race conditions.
+  __shared__ double sh_result[(sizeof(ValueType) + 7) / 8 * step_width];
+  ValueType* result = reinterpret_cast<ValueType*>(&sh_result);
+  int const step    = warp_size / blockDim.x;
+  int shift         = step_width;
+  // Skip the code below if  threadIdx.y % step != 0
+  int const id = threadIdx.y % step == 0 ? threadIdx.y / step : INT_MAX;
+  if (id < step_width) {
+    result[id] = value;
+  }
+  __syncthreads();
+  while (shift <= max_active_thread / step) {
+    if (shift <= id && shift + step_width > id && threadIdx.x == 0) {
+      reducer.join(&result[id % step_width], &value);
+    }
+    __syncthreads();
+    shift += step_width;
+  }
+
+  value = result[0];
+  for (int i = 1; (i * step < max_active_thread) && (i < step_width); ++i)
+    reducer.join(&value, &result[i]);
+}
+
+template <typename ValueType, typename ReducerType>
+__device__ inline void hip_intra_block_shuffle_reduction(
+    ValueType& value, ReducerType const& reducer,
+    int const max_active_thread = blockDim.y) {
+  hip_intra_warp_shuffle_reduction(value, reducer, max_active_thread);
+  hip_inter_warp_shuffle_reduction(value, reducer, max_active_thread);
+}
+
+template <class FunctorType>
+__device__ inline bool hip_inter_block_shuffle_reduction(
+    typename FunctorType::reference_type value,
+    typename FunctorType::reference_type neutral, FunctorType const& reducer,
+    Kokkos::Experimental::HIP::size_type* const m_scratch_space,
+    typename FunctorType::pointer_type const /*result*/,
+    Kokkos::Experimental::HIP::size_type* const m_scratch_flags,
+    int const max_active_thread = blockDim.y) {
+  using pointer_type = typename FunctorType::pointer_type;
+  using value_type   = typename FunctorType::value_type;
+
+  // Do the intra-block reduction with shfl operations for the intra warp
+  // reduction and static shared memory for the inter warp reduction
+  hip_intra_block_shuffle_reduction(value, reducer, max_active_thread);
+
+  int const id = threadIdx.y * blockDim.x + threadIdx.x;
+
+  // One thread in the block writes block result to global scratch_memory
+  if (id == 0) {
+    pointer_type global =
+        reinterpret_cast<pointer_type>(m_scratch_space) + blockIdx.x;
+    *global = value;
+  }
+
+  // One warp of last block performs inter block reduction through loading the
+  // block values from global scratch_memory
+  bool last_block = false;
+  __syncthreads();
+  int constexpr warp_size = Kokkos::Experimental::Impl::HIPTraits::WarpSize;
+  if (id < warp_size) {
+    Kokkos::Experimental::HIP::size_type count;
+
+    // Figure out whether this is the last block
+    if (id == 0) count = Kokkos::atomic_fetch_add(m_scratch_flags, 1);
+    count = Kokkos::Experimental::shfl(count, 0, warp_size);
+
+    // Last block does the inter block reduction
+    if (count == gridDim.x - 1) {
+      // set flag back to zero
+      if (id == 0) *m_scratch_flags = 0;
+      last_block = true;
+      value      = neutral;
+
+      pointer_type const global =
+          reinterpret_cast<pointer_type>(m_scratch_space);
+
+      // Reduce all global values with splitting work over threads in one warp
+      const int step_size = blockDim.x * blockDim.y < warp_size
+                                ? blockDim.x * blockDim.y
+                                : warp_size;
+      for (int i = id; i < static_cast<int>(gridDim.x); i += step_size) {
+        value_type tmp = global[i];
+        reducer.join(&value, &tmp);
+      }
+
+      // Perform shfl reductions within the warp only join if contribution is
+      // valid (allows gridDim.x non power of two and <warp_size)
+      for (unsigned int i = 1; i < warp_size; i *= 2) {
+        if ((blockDim.x * blockDim.y) > i) {
+          value_type tmp = Kokkos::Experimental::shfl_down(value, i, warp_size);
+          if (id + i < gridDim.x) reducer.join(&value, &tmp);
+        }
+      }
+    }
+  }
+  // The last block has in its thread=0 the global reduction value through
+  // "value"
+  return last_block;
+}
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Space.cpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Space.cpp
new file mode 100644 (file)
index 0000000..aee9756
--- /dev/null
@@ -0,0 +1,634 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core.hpp>
+#include <Kokkos_HIP.hpp>
+#include <Kokkos_HIP_Space.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <stdlib.h>
+#include <iostream>
+#include <sstream>
+#include <algorithm>
+#include <atomic>
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+namespace {
+
+static std::atomic<bool> is_first_hip_managed_allocation(true);
+
+bool hip_driver_check_page_migration(int deviceId) {
+  // check with driver if page migrating memory is available
+  // this driver query is copied from the hip documentation
+  int hasManagedMemory = 0;  // false by default
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceGetAttribute(
+      &hasManagedMemory, hipDeviceAttributeManagedMemory, deviceId));
+  return static_cast<bool>(hasManagedMemory);
+}
+}  // namespace
+namespace Kokkos {
+namespace Impl {
+
+namespace {
+hipStream_t get_deep_copy_stream() {
+  static hipStream_t s = nullptr;
+  if (s == nullptr) {
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamCreate(&s));
+  }
+  return s;
+}
+}  // namespace
+
+void DeepCopyHIP(void* dst, void const* src, size_t n) {
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyAsync(dst, src, n, hipMemcpyDefault));
+}
+
+void DeepCopyAsyncHIP(const Kokkos::Experimental::HIP& instance, void* dst,
+                      void const* src, size_t n) {
+  KOKKOS_IMPL_HIP_SAFE_CALL(
+      hipMemcpyAsync(dst, src, n, hipMemcpyDefault, instance.hip_stream()));
+}
+
+void DeepCopyAsyncHIP(void* dst, void const* src, size_t n) {
+  hipStream_t s = get_deep_copy_stream();
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipMemcpyAsync(dst, src, n, hipMemcpyDefault, s));
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::HIP>(
+      "Kokkos::Impl::DeepCopyAsyncHIP: Post Deep Copy Fence on Deep-Copy "
+      "stream",
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          DeepCopyResourceSynchronization,
+      [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipStreamSynchronize(s)); });
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+namespace Kokkos {
+
+KOKKOS_DEPRECATED void Experimental::HIPSpace::access_error() {
+  const std::string msg(
+      "Kokkos::Experimental::HIPSpace::access_error attempt to execute "
+      "Experimental::HIP function from non-HIP space");
+  Kokkos::Impl::throw_runtime_exception(msg);
+}
+
+KOKKOS_DEPRECATED void Experimental::HIPSpace::access_error(const void* const) {
+  const std::string msg(
+      "Kokkos::Experimental::HIPSpace::access_error attempt to execute "
+      "Experimental::HIP function from non-HIP space");
+  Kokkos::Impl::throw_runtime_exception(msg);
+}
+
+}  // namespace Kokkos
+#endif
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Experimental {
+
+HIPSpace::HIPSpace() : m_device(HIP().hip_device()) {}
+
+HIPHostPinnedSpace::HIPHostPinnedSpace() {}
+
+HIPManagedSpace::HIPManagedSpace() : m_device(HIP().hip_device()) {}
+
+void* HIPSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void* HIPSpace::allocate(
+
+    const char* arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void* HIPSpace::impl_allocate(
+
+    const char* arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  void* ptr = nullptr;
+
+  auto const error_code = hipMalloc(&ptr, arg_alloc_size);
+  if (error_code != hipSuccess) {
+    // This is the only way to clear the last error, which we should do here
+    // since we're turning it into an exception here
+    (void)hipGetLastError();
+    throw HIPRawMemoryAllocationFailure(
+        arg_alloc_size, error_code,
+        RawMemoryAllocationFailure::AllocationMechanism::HIPMalloc);
+  }
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+
+  return ptr;
+}
+
+void* HIPHostPinnedSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void* HIPHostPinnedSpace::allocate(const char* arg_label,
+                                   const size_t arg_alloc_size,
+                                   const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void* HIPHostPinnedSpace::impl_allocate(
+    const char* arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  void* ptr = nullptr;
+
+  auto const error_code =
+      hipHostMalloc(&ptr, arg_alloc_size, hipHostMallocNonCoherent);
+  if (error_code != hipSuccess) {
+    // This is the only way to clear the last error, which we should do here
+    // since we're turning it into an exception here
+    (void)hipGetLastError();
+    throw HIPRawMemoryAllocationFailure(
+        arg_alloc_size, error_code,
+        RawMemoryAllocationFailure::AllocationMechanism::HIPHostMalloc);
+  }
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+
+  return ptr;
+}
+
+void* HIPManagedSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void* HIPManagedSpace::allocate(const char* arg_label,
+                                const size_t arg_alloc_size,
+                                const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void* HIPManagedSpace::impl_allocate(
+    const char* arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  void* ptr = nullptr;
+
+  if (arg_alloc_size > 0) {
+    if (is_first_hip_managed_allocation.exchange(false) &&
+        Kokkos::show_warnings()) {
+      if (!hip_driver_check_page_migration(m_device)) {
+        std::cerr << R"warning(
+Kokkos::HIP::allocation WARNING: The combination of device and system configuration
+                                 does not support page migration between device and host.
+                                 HIPManagedSpace might not work as expected.
+                                 Please refer to the ROCm documentation on unified/managed memory.)warning"
+                  << std::endl;
+      }
+
+      // check for correct runtime environment
+      const char* hsa_xnack = std::getenv("HSA_XNACK");
+      if (!hsa_xnack)
+        std::cerr << R"warning(
+Kokkos::HIP::runtime WARNING: Kokkos did not find an environment variable 'HSA_XNACK'
+                              for the current process.
+                              Nevertheless, xnack is enabled for all processes if
+                              amdgpu.noretry=0 was set in the Linux kernel boot line.
+                              Without xnack enabled, Kokkos::HIPManaged might not behave
+                              as expected.)warning"
+                  << std::endl;
+      else if (Kokkos::Impl::strcmp(hsa_xnack, "1") != 0)
+        std::cerr << "Kokkos::HIP::runtime WARNING: Kokkos detected the "
+                     "environement variable "
+                  << "'HSA_XNACK=" << hsa_xnack << "\n"
+                  << "Kokkos advises to set it to '1' to enable it per process."
+                  << std::endl;
+    }
+    auto const error_code = hipMallocManaged(&ptr, arg_alloc_size);
+    if (error_code != hipSuccess) {
+      // This is the only way to clear the last error, which we should do here
+      // since we're turning it into an exception here
+      (void)hipGetLastError();
+      throw HIPRawMemoryAllocationFailure(
+          arg_alloc_size, error_code,
+          RawMemoryAllocationFailure::AllocationMechanism::HIPMallocManaged);
+    }
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipMemAdvise(
+        ptr, arg_alloc_size, hipMemAdviseSetCoarseGrain, m_device));
+  }
+
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+
+  return ptr;
+}
+
+void HIPSpace::deallocate(void* const arg_alloc_ptr,
+                          const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void HIPSpace::deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                          const size_t arg_alloc_size,
+                          const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HIPSpace::impl_deallocate(
+    const char* arg_label, void* const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(arg_alloc_ptr));
+}
+
+void HIPHostPinnedSpace::deallocate(void* const arg_alloc_ptr,
+                                    const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void HIPHostPinnedSpace::deallocate(const char* arg_label,
+                                    void* const arg_alloc_ptr,
+                                    const size_t arg_alloc_size,
+                                    const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HIPHostPinnedSpace::impl_deallocate(
+    const char* arg_label, void* const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipHostFree(arg_alloc_ptr));
+}
+
+void HIPManagedSpace::deallocate(void* const arg_alloc_ptr,
+                                 const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void HIPManagedSpace::deallocate(const char* arg_label,
+                                 void* const arg_alloc_ptr,
+                                 const size_t arg_alloc_size,
+                                 const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HIPManagedSpace::impl_deallocate(
+    const char* arg_label, void* const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+  // We have to unset the CoarseGrain property manually as hipFree does not take
+  // care of it. Otherwise, the allocation would continue to linger in the
+  // kernel mem page table.
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipMemAdvise(
+      arg_alloc_ptr, arg_alloc_size, hipMemAdviseUnsetCoarseGrain, m_device));
+  KOKKOS_IMPL_HIP_SAFE_CALL(hipFree(arg_alloc_ptr));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_DEBUG
+SharedAllocationRecord<void, void>
+    SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>::s_root_record;
+
+SharedAllocationRecord<void, void> SharedAllocationRecord<
+    Kokkos::Experimental::HIPHostPinnedSpace, void>::s_root_record;
+
+SharedAllocationRecord<void, void> SharedAllocationRecord<
+    Kokkos::Experimental::HIPManagedSpace, void>::s_root_record;
+#endif
+
+SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
+                       void>::~SharedAllocationRecord() {
+  auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     alloc_size, (alloc_size - sizeof(SharedAllocationHeader)));
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace,
+                       void>::~SharedAllocationRecord() {
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     SharedAllocationRecord<void, void>::m_alloc_size);
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace,
+                       void>::~SharedAllocationRecord() {
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     SharedAllocationRecord<void, void>::m_alloc_size);
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::HIPSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
+                                  void>::s_root_record,
+#endif
+          Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                       arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, arg_label);
+
+  // Copy to device memory
+  Kokkos::Experimental::HIP exec;
+  Kokkos::Impl::DeepCopy<Kokkos::Experimental::HIPSpace, HostSpace>(
+      exec, RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
+  exec.fence(
+      "SharedAllocationRecord<Kokkos::Experimental::HIPSpace, "
+      "void>::SharedAllocationRecord(): fence after copying header from "
+      "HostSpace");
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::HIP& arg_exec_space,
+        const Kokkos::Experimental::HIPSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::HIPSpace,
+                                  void>::s_root_record,
+#endif
+          Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                       arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, arg_label);
+
+  // Copy to device memory
+  Kokkos::Impl::DeepCopy<Kokkos::Experimental::HIPSpace, HostSpace>(
+      arg_exec_space, RecordBase::m_alloc_ptr, &header,
+      sizeof(SharedAllocationHeader));
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::HIPHostPinnedSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace,
+                                  void>::s_root_record,
+#endif
+          Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                       arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+  // Fill in the Header information, directly accessible via host pinned memory
+  this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
+                                                  arg_label);
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::HIPManagedSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace,
+                                  void>::s_root_record,
+#endif
+          Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                       arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+  // Fill in the Header information, directly accessible via managed memory
+  this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
+                                                  arg_label);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+namespace Kokkos {
+namespace Experimental {
+
+int HIP::concurrency() {
+  auto const& prop = hip_device_prop();
+  return prop.maxThreadsPerMultiProcessor * prop.multiProcessorCount;
+}
+int HIP::impl_is_initialized() {
+  return Impl::HIPInternal::singleton().is_initialized();
+}
+
+void HIP::impl_initialize(InitializationSettings const& settings) {
+  Impl::HIPInternal::singleton().initialize(::Kokkos::Impl::get_gpu(settings));
+}
+
+void HIP::impl_finalize() { Impl::HIPInternal::singleton().finalize(); }
+
+HIP::HIP()
+    : m_space_instance(&Impl::HIPInternal::singleton(),
+                       [](Impl::HIPInternal*) {}) {
+  Impl::HIPInternal::singleton().verify_is_initialized(
+      "HIP instance constructor");
+}
+
+HIP::HIP(hipStream_t const stream, bool manage_stream)
+    : m_space_instance(new Impl::HIPInternal, [](Impl::HIPInternal* ptr) {
+        ptr->finalize();
+        delete ptr;
+      }) {
+  Impl::HIPInternal::singleton().verify_is_initialized(
+      "HIP instance constructor");
+  m_space_instance->initialize(Impl::HIPInternal::singleton().m_hipDev, stream,
+                               manage_stream);
+}
+
+void HIP::print_configuration(std::ostream& os, bool /*verbose*/) const {
+  os << "Device Execution Space:\n";
+  os << "  KOKKOS_ENABLE_HIP: yes\n";
+
+  os << "HIP Options:\n";
+  os << "  KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE: ";
+#ifdef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+
+  os << "\nRuntime Configuration:\n";
+
+  m_space_instance->print_configuration(os);
+}
+
+uint32_t HIP::impl_instance_id() const noexcept {
+  return m_space_instance->impl_get_instance_id();
+}
+void HIP::impl_static_fence(const std::string& name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::HIP>(
+      name,
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          GlobalDeviceSynchronization,
+      [&]() { KOKKOS_IMPL_HIP_SAFE_CALL(hipDeviceSynchronize()); });
+}
+
+void HIP::fence(const std::string& name) const {
+  m_space_instance->fence(name);
+}
+
+hipStream_t HIP::hip_stream() const { return m_space_instance->m_stream; }
+
+int HIP::hip_device() const { return impl_internal_space_instance()->m_hipDev; }
+
+hipDeviceProp_t const& HIP::hip_device_prop() {
+  return Impl::HIPInternal::singleton().m_deviceProp;
+}
+
+const char* HIP::name() { return "HIP"; }
+
+}  // namespace Experimental
+
+namespace Impl {
+
+int g_hip_space_factory_initialized =
+    initialize_space_factory<::Kokkos::Experimental::HIP>("150_HIP");
+
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_CXX14
+namespace Tools {
+namespace Experimental {
+constexpr DeviceType DeviceTypeTraits<Kokkos::Experimental::HIP>::id;
+}
+}  // namespace Tools
+#endif
+
+}  // namespace Kokkos
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// To avoid additional compilation cost for something that's (mostly?) not
+// performance sensitive, we explicity instantiate these CRTP base classes here,
+// where we have access to the associated *_timpl.hpp header files.
+template class HostInaccessibleSharedAllocationRecordCommon<
+    Kokkos::Experimental::HIPSpace>;
+template class SharedAllocationRecordCommon<Kokkos::Experimental::HIPSpace>;
+template class SharedAllocationRecordCommon<
+    Kokkos::Experimental::HIPHostPinnedSpace>;
+template class SharedAllocationRecordCommon<
+    Kokkos::Experimental::HIPManagedSpace>;
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Team.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Team.hpp
new file mode 100644 (file)
index 0000000..9ddfa5f
--- /dev/null
@@ -0,0 +1,929 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_TEAM_HPP
+#define KOKKOS_HIP_TEAM_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(__HIPCC__)
+
+#include <utility>
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_KernelLaunch.hpp>
+#include <HIP/Kokkos_HIP_ReduceScan.hpp>
+#include <HIP/Kokkos_HIP_Shuffle_Reduce.hpp>
+#include <HIP/Kokkos_HIP_BlockSize_Deduction.hpp>
+#include <Kokkos_Vectorization.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename Type>
+struct HIPJoinFunctor {
+  using value_type = Type;
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& update, const value_type& input) {
+    update += input;
+  }
+};
+
+/**\brief  Team member_type passed to TeamPolicy or TeamTask closures.
+ *
+ *  HIP thread blocks for team closures are dimensioned as:
+ *    blockDim.x == number of "vector lanes" per "thread"
+ *    blockDim.y == number of "threads" per team
+ *    blockDim.z == number of teams in a block
+ *  where
+ *    A set of teams exactly fill a warp OR a team is the whole block
+ *      ( 0 == WarpSize % ( blockDim.x * blockDim.y ) )
+ *      OR
+ *      ( 1 == blockDim.z )
+
+ *  Thus when 1 < blockDim.z the team is warp-synchronous
+ *  and __syncthreads should not be called in team collectives.
+ *
+ *  When multiple teams are mapped onto a single block then the
+ *  total available shared memory must be partitioned among teams.
+ */
+class HIPTeamMember {
+ public:
+  using execution_space      = Kokkos::Experimental::HIP;
+  using scratch_memory_space = execution_space::scratch_memory_space;
+
+ private:
+  mutable void* m_team_reduce;
+  scratch_memory_space m_team_shared;
+  int m_team_reduce_size;
+  int m_league_rank;
+  int m_league_size;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_shmem() const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_scratch(
+      const int& level) const {
+    return m_team_shared.set_team_thread_mode(level, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& thread_scratch(
+      const int& level) const {
+    return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
+  }
+
+  KOKKOS_INLINE_FUNCTION int league_rank() const { return m_league_rank; }
+  KOKKOS_INLINE_FUNCTION int league_size() const { return m_league_size; }
+  KOKKOS_INLINE_FUNCTION int team_rank() const {
+#ifdef __HIP_DEVICE_COMPILE__
+    return threadIdx.y;
+#else
+    return 0;
+#endif
+  }
+
+  KOKKOS_INLINE_FUNCTION int team_size() const {
+#ifdef __HIP_DEVICE_COMPILE__
+    return blockDim.y;
+#else
+    return 0;
+#endif
+  }
+
+  KOKKOS_INLINE_FUNCTION void team_barrier() const {
+#ifdef __HIP_DEVICE_COMPILE__
+    if (1 == blockDim.z)
+      __syncthreads();  // team == block
+    else
+      __threadfence_block();  // team <= warp
+#endif
+  }
+
+  //--------------------------------------------------------------------------
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType& val,
+                                             const int& thread_id) const {
+#ifdef __HIP_DEVICE_COMPILE__
+    if (blockDim.z == 1) {  // team == block
+      __syncthreads();
+      // Wait for shared data write until all threads arrive here
+      if (threadIdx.x == 0u &&
+          threadIdx.y == static_cast<uint32_t>(thread_id)) {
+        *(reinterpret_cast<ValueType*>(m_team_reduce)) = val;
+      }
+      __syncthreads();  // Wait for shared data read until root thread writes
+      val = *(reinterpret_cast<ValueType*>(m_team_reduce));
+    } else {               // team <= warp
+      ValueType tmp(val);  // input might not be a register variable
+      ::Kokkos::Experimental::Impl::in_place_shfl(
+          val, tmp, blockDim.x * thread_id, blockDim.x * blockDim.y);
+    }
+#else
+    (void)val;
+    (void)thread_id;
+#endif
+  }
+
+  template <class Closure, class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(Closure const& f, ValueType& val,
+                                             const int& thread_id) const {
+    f(val);
+    team_broadcast(val, thread_id);
+  }
+
+  //--------------------------------------------------------------------------
+  /**\brief  Reduction across a team
+   *
+   *  Mapping of teams onto blocks:
+   *    blockDim.x  is "vector lanes"
+   *    blockDim.y  is team "threads"
+   *    blockDim.z  is number of teams per block
+   *
+   *  Requires:
+   *    blockDim.x is power two
+   *    blockDim.x <= HIPTraits::WarpSize
+   *    ( 0 == HIPTraits::WarpSize % ( blockDim.x * blockDim.y )
+   *      OR
+   *    ( 1 == blockDim.z )
+   */
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer) const noexcept {
+    team_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer,
+              typename ReducerType::value_type& value) const noexcept {
+#ifdef __HIP_DEVICE_COMPILE__
+    typename Kokkos::Impl::FunctorAnalysis<
+        FunctorPatternInterface::REDUCE, TeamPolicy<Experimental::HIP>,
+        ReducerType>::Reducer wrapped_reducer(&reducer);
+    hip_intra_block_shuffle_reduction(value, wrapped_reducer, blockDim.y);
+    reducer.reference() = value;
+#else
+    (void)reducer;
+    (void)value;
+#endif
+  }
+
+  //--------------------------------------------------------------------------
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering
+   *          with intra-team non-deterministic ordering accumulation.
+   *
+   *  The global inter-team accumulation value will, at the end of the
+   *  league's parallel execution, be the scan's total.
+   *  Parallel execution ordering of the league's teams is non-deterministic.
+   *  As such the base value for each team's scan operation is similarly
+   *  non-deterministic.
+   */
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value,
+                                        Type* const global_accum) const {
+#ifdef __HIP_DEVICE_COMPILE__
+    Type* const base_data = reinterpret_cast<Type*>(m_team_reduce);
+
+    __syncthreads();  // Don't write in to shared data until all threads have
+                      // entered this function
+
+    if (0 == threadIdx.y) {
+      base_data[0] = 0;
+    }
+
+    base_data[threadIdx.y + 1] = value;
+
+    Impl::HIPJoinFunctor<Type> hip_join_functor;
+    typename Kokkos::Impl::FunctorAnalysis<
+        FunctorPatternInterface::REDUCE, TeamPolicy<Experimental::HIP>,
+        Impl::HIPJoinFunctor<Type>>::Reducer reducer(&hip_join_functor);
+    Impl::hip_intra_block_reduce_scan<true>(reducer, base_data + 1);
+
+    if (global_accum) {
+      if (blockDim.y == threadIdx.y + 1) {
+        base_data[blockDim.y] =
+            atomic_fetch_add(global_accum, base_data[blockDim.y]);
+      }
+      __syncthreads();  // Wait for atomic
+      base_data[threadIdx.y] += base_data[blockDim.y];
+    }
+
+    return base_data[threadIdx.y];
+#else
+    (void)value;
+    (void)global_accum;
+    return Type();
+#endif
+  }
+
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering.
+   *
+   *  The highest rank thread can compute the reduction total as
+   *    reduction_total = dev.team_scan( value ) + value ;
+   */
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value) const {
+    return this->template team_scan<Type>(value, nullptr);
+  }
+
+  //----------------------------------------
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer<ReducerType>::value>
+  vector_reduce(ReducerType const& reducer) {
+    vector_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_reducer<ReducerType>::value>
+  vector_reduce(ReducerType const& reducer,
+                typename ReducerType::value_type& value) {
+#ifdef __HIP_DEVICE_COMPILE__
+    if (blockDim.x == 1) return;
+
+    // Intra vector lane shuffle reduction:
+    typename ReducerType::value_type tmp(value);
+    typename ReducerType::value_type tmp2 = tmp;
+
+    for (int i = blockDim.x; (i >>= 1);) {
+      ::Kokkos::Experimental::Impl::in_place_shfl_down(tmp2, tmp, i,
+                                                       blockDim.x);
+      if (static_cast<int>(threadIdx.x) < i) {
+        reducer.join(tmp, tmp2);
+      }
+    }
+
+    // Broadcast from root lane to all other lanes.
+    // Cannot use "butterfly" algorithm to avoid the broadcast
+    // because floating point summation is not associative
+    // and thus different threads could have different results.
+
+    ::Kokkos::Experimental::Impl::in_place_shfl(tmp2, tmp, 0, blockDim.x);
+    value               = tmp2;
+    reducer.reference() = tmp2;
+#else
+    (void)reducer;
+    (void)value;
+#endif
+  }
+
+  //----------------------------------------
+  // Private for the driver
+
+  KOKKOS_INLINE_FUNCTION
+  HIPTeamMember(void* shared, const size_t shared_begin,
+                const size_t shared_size, void* scratch_level_1_ptr,
+                const size_t scratch_level_1_size, const int arg_league_rank,
+                const int arg_league_size)
+      : m_team_reduce(shared),
+        m_team_shared(((char*)shared) + shared_begin, shared_size,
+                      scratch_level_1_ptr, scratch_level_1_size),
+        m_team_reduce_size(shared_begin),
+        m_league_rank(arg_league_rank),
+        m_league_size(arg_league_size) {}
+
+ public:
+  // Declare to avoid unused private member warnings which are trigger
+  // when SFINAE excludes the member function which uses these variables
+  // Making another class a friend also surpresses these warnings
+  bool impl_avoid_sfinae_warning() const noexcept {
+    return m_team_reduce_size > 0 && m_team_reduce != nullptr;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename iType>
+struct TeamThreadRangeBoundariesStruct<iType, HIPTeamMember> {
+  using index_type = iType;
+  const HIPTeamMember& member;
+  const iType start;
+  const iType end;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const HIPTeamMember& thread_, iType count)
+      : member(thread_), start(0), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const HIPTeamMember& thread_, iType begin_,
+                                  iType end_)
+      : member(thread_), start(begin_), end(end_) {}
+};
+
+template <typename iType>
+struct TeamVectorRangeBoundariesStruct<iType, HIPTeamMember> {
+  using index_type = iType;
+  const HIPTeamMember& member;
+  const iType start;
+  const iType end;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const HIPTeamMember& thread_,
+                                  const iType& count)
+      : member(thread_), start(0), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const HIPTeamMember& thread_,
+                                  const iType& begin_, const iType& end_)
+      : member(thread_), start(begin_), end(end_) {}
+};
+
+template <typename iType>
+struct ThreadVectorRangeBoundariesStruct<iType, HIPTeamMember> {
+  using index_type = iType;
+  const index_type start;
+  const index_type end;
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(const HIPTeamMember, index_type count)
+      : start(static_cast<index_type>(0)), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(index_type count)
+      : start(static_cast<index_type>(0)), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(const HIPTeamMember, index_type arg_begin,
+                                    index_type arg_end)
+      : start(arg_begin), end(arg_end) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(index_type arg_begin, index_type arg_end)
+      : start(arg_begin), end(arg_end) {}
+};
+
+}  // namespace Impl
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HIPTeamMember>
+    TeamThreadRange(const Impl::HIPTeamMember& thread, iType count) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HIPTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::HIPTeamMember>
+TeamThreadRange(const Impl::HIPTeamMember& thread, iType1 begin, iType2 end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HIPTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>
+    TeamVectorRange(const Impl::HIPTeamMember& thread, const iType& count) {
+  return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::HIPTeamMember>
+TeamVectorRange(const Impl::HIPTeamMember& thread, const iType1& begin,
+                const iType2& end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>
+    ThreadVectorRange(const Impl::HIPTeamMember& thread, iType count) {
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::HIPTeamMember>
+ThreadVectorRange(const Impl::HIPTeamMember& thread, iType1 arg_begin,
+                  iType2 arg_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>(
+      thread, iType(arg_begin), iType(arg_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::HIPTeamMember> PerTeam(
+    const Impl::HIPTeamMember& thread) {
+  return Impl::ThreadSingleStruct<Impl::HIPTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::HIPTeamMember> PerThread(
+    const Impl::HIPTeamMember& thread) {
+  return Impl::VectorSingleStruct<Impl::HIPTeamMember>(thread);
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Inter-thread parallel_for.
+ *
+ *  Executes closure(iType i) for each i=[0..N).
+ *
+ * The range [0..N) is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+#ifdef __HIP_DEVICE_COMPILE__
+  for (iType i = loop_boundaries.start + threadIdx.y; i < loop_boundaries.end;
+       i += blockDim.y)
+    closure(i);
+#else
+  (void)loop_boundaries;
+  (void)closure;
+#endif
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Inter-thread parallel_reduce with a reducer.
+ *
+ *  Executes closure(iType i, ValueType & val) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all threads of the
+ *  calling thread team and a summation of val is
+ *  performed and put into result.
+ */
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::HIPTeamMember>& loop_boundaries,
+                const Closure& closure, const ReducerType& reducer) {
+#ifdef __HIP_DEVICE_COMPILE__
+  typename ReducerType::value_type value;
+  reducer.init(value);
+
+  for (iType i = loop_boundaries.start + threadIdx.y; i < loop_boundaries.end;
+       i += blockDim.y) {
+    closure(i, value);
+  }
+
+  loop_boundaries.member.team_reduce(reducer, value);
+#else
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+#endif
+}
+
+/** \brief  Inter-thread parallel_reduce assuming summation.
+ *
+ *  Executes closure(iType i, ValueType & val) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all threads of the
+ *  calling thread team and a summation of val is
+ *  performed and put into result.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::HIPTeamMember>& loop_boundaries,
+                const Closure& closure, ValueType& result) {
+#ifdef __HIP_DEVICE_COMPILE__
+  ValueType val;
+  Kokkos::Sum<ValueType> reducer(val);
+
+  reducer.init(reducer.reference());
+
+  for (iType i = loop_boundaries.start + threadIdx.y; i < loop_boundaries.end;
+       i += blockDim.y) {
+    closure(i, val);
+  }
+
+  loop_boundaries.member.team_reduce(reducer, val);
+  result = reducer.reference();
+#else
+  (void)loop_boundaries;
+  (void)closure;
+  (void)result;
+#endif
+}
+
+/** \brief  Inter-thread parallel exclusive prefix sum.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to each rank in the team (whose global rank is
+ *  less than N) and a scan operation is performed. The last call to closure has
+ *  final == true.
+ */
+// This is the same code as in CUDA and largely the same as in OpenMPTarget
+template <typename iType, typename FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
+        loop_bounds,
+    const FunctorType& lambda) {
+  // Extract value_type from lambda
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void,
+      FunctorType>::value_type;
+
+  const auto start     = loop_bounds.start;
+  const auto end       = loop_bounds.end;
+  auto& member         = loop_bounds.member;
+  const auto team_size = member.team_size();
+  const auto team_rank = member.team_rank();
+  const auto nchunk    = (end - start + team_size - 1) / team_size;
+  value_type accum     = 0;
+  // each team has to process one or more chunks of the prefix scan
+  for (iType i = 0; i < nchunk; ++i) {
+    auto ii = start + i * team_size + team_rank;
+    // local accumulation for this chunk
+    value_type local_accum = 0;
+    // user updates value with prefix value
+    if (ii < loop_bounds.end) lambda(ii, local_accum, false);
+    // perform team scan
+    local_accum = member.team_scan(local_accum);
+    // add this blocks accum to total accumulation
+    auto val = accum + local_accum;
+    // user updates their data with total accumulation
+    if (ii < loop_bounds.end) lambda(ii, val, true);
+    // the last value needs to be propogated to next chunk
+    if (team_rank == team_size - 1) accum = val;
+    // broadcast last value to rest of the team
+    member.team_broadcast(accum, team_size - 1);
+  }
+}
+
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+#ifdef __HIP_DEVICE_COMPILE__
+  for (iType i = loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
+       i < loop_boundaries.end; i += blockDim.y * blockDim.x)
+    closure(i);
+#else
+  (void)loop_boundaries;
+  (void)closure;
+#endif
+}
+
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::HIPTeamMember>& loop_boundaries,
+                const Closure& closure, const ReducerType& reducer) {
+#ifdef __HIP_DEVICE_COMPILE__
+  typename ReducerType::value_type value;
+  reducer.init(value);
+
+  for (iType i = loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
+       i < loop_boundaries.end; i += blockDim.y * blockDim.x) {
+    closure(i, value);
+  }
+
+  loop_boundaries.member.vector_reduce(reducer, value);
+  loop_boundaries.member.team_reduce(reducer, value);
+#else
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+#endif
+}
+
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::HIPTeamMember>& loop_boundaries,
+                const Closure& closure, ValueType& result) {
+#ifdef __HIP_DEVICE_COMPILE__
+  ValueType val;
+  Kokkos::Sum<ValueType> reducer(val);
+
+  reducer.init(reducer.reference());
+
+  for (iType i = loop_boundaries.start + threadIdx.y * blockDim.x + threadIdx.x;
+       i < loop_boundaries.end; i += blockDim.y * blockDim.x) {
+    closure(i, val);
+  }
+
+  loop_boundaries.member.vector_reduce(reducer);
+  loop_boundaries.member.team_reduce(reducer);
+  result = reducer.reference();
+#else
+  (void)loop_boundaries;
+  (void)closure;
+  (void)result;
+#endif
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel_for.
+ *
+ *  Executes closure(iType i) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to all vector lanes of the the calling thread.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+#ifdef __HIP_DEVICE_COMPILE__
+  for (iType i = loop_boundaries.start + threadIdx.x; i < loop_boundaries.end;
+       i += blockDim.x) {
+    closure(i);
+  }
+#else
+  (void)loop_boundaries;
+  (void)closure;
+#endif
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel_reduce.
+ *
+ *  Calls closure(iType i, ValueType & val) for each i=[0..N).
+ *
+ *  The range [0..N) is mapped to all vector lanes of
+ *  the calling thread and a reduction of val is performed using +=
+ *  and output into result.
+ *
+ *  The identity value for the += operator is assumed to be the default
+ *  constructed value.
+ */
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::HIPTeamMember> const& loop_boundaries,
+                Closure const& closure, ReducerType const& reducer) {
+#ifdef __HIP_DEVICE_COMPILE__
+  reducer.init(reducer.reference());
+
+  for (iType i = loop_boundaries.start + threadIdx.x; i < loop_boundaries.end;
+       i += blockDim.x) {
+    closure(i, reducer.reference());
+  }
+
+  Impl::HIPTeamMember::vector_reduce(reducer);
+#else
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+#endif
+}
+
+/** \brief  Intra-thread vector parallel_reduce.
+ *
+ *  Calls closure(iType i, ValueType & val) for each i=[0..N).
+ *
+ *  The range [0..N) is mapped to all vector lanes of
+ *  the calling thread and a reduction of val is performed using +=
+ *  and output into result.
+ *
+ *  The identity value for the += operator is assumed to be the default
+ *  constructed value.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!is_reducer<ValueType>::value>
+parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::HIPTeamMember> const& loop_boundaries,
+                Closure const& closure, ValueType& result) {
+#ifdef __HIP_DEVICE_COMPILE__
+  result = ValueType();
+
+  for (iType i = loop_boundaries.start + threadIdx.x; i < loop_boundaries.end;
+       i += blockDim.x) {
+    closure(i, result);
+  }
+
+  Impl::HIPTeamMember::vector_reduce(Kokkos::Sum<ValueType>(result));
+#else
+  (void)loop_boundaries;
+  (void)closure;
+  (void)result;
+#endif
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel scan with reducer.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all vector lanes in the
+ *  thread and a scan operation is performed.
+ *  The last call to closure has final == true.
+ */
+template <typename iType, class Closure, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_scan(const Impl::ThreadVectorRangeBoundariesStruct<
+                  iType, Impl::HIPTeamMember>& loop_boundaries,
+              const Closure& closure, const ReducerType& reducer) {
+#ifdef __HIP_DEVICE_COMPILE__
+  using value_type = typename ReducerType::value_type;
+  value_type accum;
+  reducer.init(accum);
+  const value_type identity = accum;
+
+  // Loop through boundaries by vector-length chunks
+  // must scan at each iteration
+
+  // All thread "lanes" must loop the same number of times.
+  // Determine an loop end for all thread "lanes."
+  // Requires:
+  //   blockDim.x is power of two and thus
+  //     ( end % blockDim.x ) == ( end & ( blockDim.x - 1 ) )
+  //   1 <= blockDim.x <= HIPTraits::WarpSize
+
+  const int mask = blockDim.x - 1;
+  const int rem  = loop_boundaries.end & mask;  // == end % blockDim.x
+  const int end  = loop_boundaries.end + (rem ? blockDim.x - rem : 0);
+
+  for (int i = threadIdx.x; i < end; i += blockDim.x) {
+    value_type val = identity;
+
+    // First acquire per-lane contributions.
+    // This sets i's val to i-1's contribution
+    // to make the latter in_place_shfl_up an
+    // exclusive scan -- the final accumulation
+    // of i's val will be included in the second
+    // closure call later.
+    if (i < loop_boundaries.end && threadIdx.x > 0) closure(i - 1, val, false);
+
+    // Bottom up exclusive scan in triangular pattern
+    // where each HIP thread is the root of a reduction tree
+    // from the zeroth "lane" to itself.
+    //  [t] += [t-1] if t >= 1
+    //  [t] += [t-2] if t >= 2
+    //  [t] += [t-4] if t >= 4
+    //  ...
+    //  This differs from the non-reducer overload, where an inclusive scan was
+    //  implemented, because in general the binary operator cannot be inverted
+    //  and we would not be able to remove the inclusive contribution by
+    //  inversion.
+    for (int j = 1; j < static_cast<int>(blockDim.x); j <<= 1) {
+      value_type tmp = identity;
+      ::Kokkos::Experimental::Impl::in_place_shfl_up(tmp, val, j, blockDim.x);
+      if (j <= static_cast<int>(threadIdx.x)) {
+        reducer.join(val, tmp);
+      }
+    }
+
+    // Include accumulation
+    reducer.join(val, accum);
+
+    // Update i's contribution into the val
+    // and add it to accum for next round
+    if (i < loop_boundaries.end) closure(i, val, true);
+    ::Kokkos::Experimental::Impl::in_place_shfl(accum, val, blockDim.x - 1,
+                                                blockDim.x);
+  }
+#else
+  (void)loop_boundaries;
+  (void)closure;
+  (void)reducer;
+#endif
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel exclusive prefix sum.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all vector lanes in the
+ *  thread and a scan operation is performed.
+ *  The last call to closure has final == true.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HIPTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+  value_type dummy;
+  parallel_scan(loop_boundaries, closure, Kokkos::Sum<value_type>(dummy));
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::HIPTeamMember>&,
+    const FunctorType& lambda) {
+#ifdef __HIP_DEVICE_COMPILE__
+  if (threadIdx.x == 0) lambda();
+#else
+  (void)lambda;
+#endif
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::HIPTeamMember>&,
+    const FunctorType& lambda) {
+#ifdef __HIP_DEVICE_COMPILE__
+  if (threadIdx.x == 0 && threadIdx.y == 0) lambda();
+#else
+  (void)lambda;
+#endif
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::HIPTeamMember>&,
+    const FunctorType& lambda, ValueType& val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  if (threadIdx.x == 0) lambda(val);
+  ::Kokkos::Experimental::Impl::in_place_shfl(val, val, 0, blockDim.x);
+#else
+  (void)lambda;
+  (void)val;
+#endif
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::HIPTeamMember>& single_struct,
+    const FunctorType& lambda, ValueType& val) {
+#ifdef __HIP_DEVICE_COMPILE__
+  if (threadIdx.x == 0 && threadIdx.y == 0) {
+    lambda(val);
+  }
+  single_struct.team_member.team_broadcast(val, 0);
+#else
+  (void)single_struct;
+  (void)lambda;
+  (void)val;
+#endif
+}
+
+}  // namespace Kokkos
+
+#endif /* defined( __HIPCC__ ) */
+
+#endif /* #ifndef KOKKOS_HIP_TEAM_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_UniqueToken.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_UniqueToken.hpp
new file mode 100644 (file)
index 0000000..a0722f6
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_UNIQUE_TOKEN_HPP
+#define KOKKOS_HIP_UNIQUE_TOKEN_HPP
+
+#include <Kokkos_HIP_Space.hpp>
+#include <Kokkos_UniqueToken.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+namespace Kokkos {
+
+namespace Impl {
+Kokkos::View<uint32_t*, Kokkos::Experimental::HIPSpace>
+hip_global_unique_token_locks(bool deallocate = false);
+}
+
+namespace Experimental {
+
+// both global and instance Unique Tokens are implemented in the same way
+// the global version has one shared static lock array underneath
+// but it can't be a static member variable since we need to acces it on device
+// and we share the implementation with the instance version
+template <>
+class UniqueToken<HIP, UniqueTokenScope::Global> {
+ protected:
+  View<uint32_t*, HIPSpace> m_locks;
+
+ public:
+  using execution_space = HIP;
+  using size_type       = int32_t;
+
+  explicit UniqueToken(execution_space const& = HIP())
+      : m_locks(Kokkos::Impl::hip_global_unique_token_locks()) {}
+
+ protected:
+  // These are constructors for the Instance version
+  UniqueToken(size_type max_size) {
+    m_locks = Kokkos::View<uint32_t*, HIPSpace>("Kokkos::UniqueToken::m_locks",
+                                                max_size);
+  }
+  UniqueToken(size_type max_size, execution_space const& exec) {
+    m_locks = Kokkos::View<uint32_t*, HIPSpace>(
+        Kokkos::view_alloc(exec, "Kokkos::UniqueToken::m_locks"), max_size);
+  }
+
+ public:
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(UniqueToken&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(UniqueToken&&) = default;
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type size() const noexcept { return m_locks.extent(0); }
+
+ private:
+  __device__ size_type impl_acquire() const {
+    int idx = blockIdx.x * (blockDim.x * blockDim.y) +
+              threadIdx.y * blockDim.x + threadIdx.x;
+    idx                            = idx % size();
+    unsigned long long active      = __ballot(1);
+    unsigned long long done_active = 0;
+    bool done                      = false;
+    while (active != done_active) {
+      if (!done) {
+        // Using m_locks(idx) fails self containment test of Kokkos_HIP.hpp
+        // That failure stems from the space access verification because the
+        // Host execution space is not defined
+        if (Kokkos::atomic_compare_exchange(m_locks.data() + idx, 0, 1) == 0) {
+          done = true;
+        } else {
+          idx += blockDim.y * blockDim.x + 1;
+          idx = idx % size();
+        }
+      }
+      done_active = __ballot(done ? 1 : 0);
+    }
+
+// Make sure that all writes in the previous lock owner are visible to me
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+    desul::atomic_thread_fence(desul::MemoryOrderAcquire(),
+                               desul::MemoryScopeDevice());
+#else
+    Kokkos::memory_fence();
+#endif
+    return idx;
+  }
+
+ public:
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type acquire() const {
+    KOKKOS_IF_ON_DEVICE(return impl_acquire();)
+    KOKKOS_IF_ON_HOST(return 0;)
+  }
+
+  /// \brief release an acquired value
+  KOKKOS_INLINE_FUNCTION
+  void release(size_type idx) const noexcept {
+// Make sure my writes are visible to the next lock owner
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+    desul::atomic_thread_fence(desul::MemoryOrderRelease(),
+                               desul::MemoryScopeDevice());
+#else
+    Kokkos::memory_fence();
+#endif
+    (void)Kokkos::atomic_exchange(m_locks.data() + idx, 0);
+  }
+};
+
+template <>
+class UniqueToken<HIP, UniqueTokenScope::Instance>
+    : public UniqueToken<HIP, UniqueTokenScope::Global> {
+ public:
+  // The instance version will forward to protected constructor which creates
+  // a lock array per instance
+  UniqueToken()
+      : UniqueToken<HIP, UniqueTokenScope::Global>(
+            Kokkos::Experimental::HIP().concurrency()) {}
+  explicit UniqueToken(execution_space const& arg)
+      : UniqueToken<HIP, UniqueTokenScope::Global>(
+            Kokkos::Experimental::HIP().concurrency(), arg) {}
+  explicit UniqueToken(size_type max_size)
+      : UniqueToken<HIP, UniqueTokenScope::Global>(max_size) {}
+  UniqueToken(size_type max_size, execution_space const& arg)
+      : UniqueToken<HIP, UniqueTokenScope::Global>(max_size, arg) {}
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif  // KOKKOS_HIP_UNIQUE_TOKEN_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Vectorization.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_Vectorization.hpp
new file mode 100644 (file)
index 0000000..18b5f57
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_VECTORIZATION_HPP
+#define KOKKOS_HIP_VECTORIZATION_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+// Shuffle operations require input to be a register (stack) variable
+
+// Derived implements do_shfl_op( T& in, int lane, int width),
+// which turns in to one of __shfl_XXX
+// Since the logic with respect to value sizes, etc., is the same everywhere,
+// put it all in one place.
+template <class Derived>
+struct in_place_shfl_op {
+  // CRTP boilerplate
+  __device__ KOKKOS_IMPL_FORCEINLINE const Derived& self() const noexcept {
+    return *static_cast<Derived const*>(this);
+  }
+
+  // sizeof(Scalar) < sizeof(int) case
+  template <class Scalar>
+  // requires _assignable_from_bits<Scalar>
+  __device__ inline std::enable_if_t<sizeof(Scalar) < sizeof(int)> operator()(
+      Scalar& out, Scalar const& in, int lane_or_delta, int width) const
+      noexcept {
+    using shfl_type = int;
+    union conv_type {
+      Scalar orig;
+      shfl_type conv;
+      // This should be fine, members get explicitly reset, which changes the
+      // active member
+      KOKKOS_FUNCTION conv_type() { conv = 0; }
+    };
+    conv_type tmp_in;
+    tmp_in.orig = in;
+    shfl_type tmp_out;
+    tmp_out = reinterpret_cast<shfl_type&>(tmp_in.orig);
+    conv_type res;
+    //------------------------------------------------
+    res.conv = self().do_shfl_op(tmp_out, lane_or_delta, width);
+    //------------------------------------------------
+    out = reinterpret_cast<Scalar&>(res.conv);
+  }
+
+  // sizeof(Scalar) == sizeof(int) case
+  template <class Scalar>
+  // requires _assignable_from_bits<Scalar>
+  __device__ inline std::enable_if_t<sizeof(Scalar) == sizeof(int)> operator()(
+      Scalar& out, Scalar const& in, int lane_or_delta, int width) const
+      noexcept {
+    reinterpret_cast<int&>(out) = self().do_shfl_op(
+        reinterpret_cast<int const&>(in), lane_or_delta, width);
+  }
+
+  template <class Scalar>
+  __device__ inline std::enable_if_t<sizeof(Scalar) == sizeof(double)>
+  operator()(Scalar& out, Scalar const& in, int lane_or_delta, int width) const
+      noexcept {
+    reinterpret_cast<double&>(out) = self().do_shfl_op(
+        *reinterpret_cast<double const*>(&in), lane_or_delta, width);
+  }
+
+  // sizeof(Scalar) > sizeof(double) case
+  template <typename Scalar>
+  __device__ inline std::enable_if_t<(sizeof(Scalar) > sizeof(double))>
+  operator()(Scalar& out, const Scalar& val, int lane_or_delta, int width) const
+      noexcept {
+    using shuffle_as_t = int;
+    int constexpr N    = sizeof(Scalar) / sizeof(shuffle_as_t);
+
+    for (int i = 0; i < N; ++i) {
+      reinterpret_cast<shuffle_as_t*>(&out)[i] = self().do_shfl_op(
+          reinterpret_cast<shuffle_as_t const*>(&val)[i], lane_or_delta, width);
+    }
+    // FIXME_HIP - this fence should be removed once the hip-clang compiler
+    // properly supports fence semanics for shuffles
+    __atomic_signal_fence(__ATOMIC_SEQ_CST);
+  }
+};
+
+struct in_place_shfl_fn : in_place_shfl_op<in_place_shfl_fn> {
+  template <class T>
+  __device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(T& val, int lane,
+                                                  int width) const noexcept {
+    auto return_val = __shfl(val, lane, width);
+    return return_val;
+  }
+};
+
+template <class... Args>
+__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl(Args&&... args) noexcept {
+  in_place_shfl_fn{}((Args &&) args...);
+}
+
+struct in_place_shfl_up_fn : in_place_shfl_op<in_place_shfl_up_fn> {
+  template <class T>
+  __device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(T& val, int lane,
+                                                  int width) const noexcept {
+    auto return_val = __shfl_up(val, lane, width);
+    return return_val;
+  }
+};
+
+template <class... Args>
+__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_up(
+    Args&&... args) noexcept {
+  in_place_shfl_up_fn{}((Args &&) args...);
+}
+
+struct in_place_shfl_down_fn : in_place_shfl_op<in_place_shfl_down_fn> {
+  template <class T>
+  __device__ KOKKOS_IMPL_FORCEINLINE T do_shfl_op(T& val, int lane,
+                                                  int width) const noexcept {
+    auto return_val = __shfl_down(val, lane, width);
+    return return_val;
+  }
+};
+
+template <class... Args>
+__device__ KOKKOS_IMPL_FORCEINLINE void in_place_shfl_down(
+    Args&&... args) noexcept {
+  in_place_shfl_down_fn{}((Args &&) args...);
+}
+
+}  // namespace Impl
+
+template <class T>
+// requires default_constructible<T> && _assignable_from_bits<T>
+__device__ inline T shfl(const T& val, const int& srcLane, const int& width) {
+  T rv = {};
+  Impl::in_place_shfl(rv, val, srcLane, width);
+  return rv;
+}
+
+template <class T>
+// requires default_constructible<T> && _assignable_from_bits<T>
+__device__ inline T shfl_down(const T& val, int delta, int width) {
+  T rv = {};
+  Impl::in_place_shfl_down(rv, val, delta, width);
+  return rv;
+}
+
+template <class T>
+// requires default_constructible<T> && _assignable_from_bits<T>
+__device__ inline T shfl_up(const T& val, int delta, int width) {
+  T rv = {};
+  Impl::in_place_shfl_up(rv, val, delta, width);
+  return rv;
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_WorkGraphPolicy.hpp b/bundled/kokkos-3.7.00/core/src/HIP/Kokkos_HIP_WorkGraphPolicy.hpp
new file mode 100644 (file)
index 0000000..081f6f4
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_WORKGRAPHPOLICY_HPP
+#define KOKKOS_HIP_WORKGRAPHPOLICY_HPP
+
+#include <Kokkos_HIP.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+                  Kokkos::Experimental::HIP> {
+ public:
+  using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+  using Self   = ParallelFor<FunctorType, Policy, Kokkos::Experimental::HIP>;
+
+ private:
+  Policy m_policy;
+  FunctorType m_functor;
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    m_functor(w);
+  }
+
+  template <class TagType>
+  __device__ inline std::enable_if_t<!std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    const TagType t{};
+    m_functor(t, w);
+  }
+
+ public:
+  __device__ inline void operator()() const noexcept {
+    // Spin until COMPLETED_TOKEN.
+    // END_TOKEN indicates no work is currently available.
+    for (std::int32_t w = Policy::END_TOKEN;
+         Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+      if (Policy::END_TOKEN != w) {
+        exec_one<typename Policy::work_tag>(w);
+        m_policy.completed_work(w);
+      }
+    }
+  }
+
+  inline void execute() {
+    const int warps_per_block = 4;
+    const dim3 grid(
+        Kokkos::Experimental::Impl::hip_internal_multiprocessor_count(), 1, 1);
+    const dim3 block(1, Kokkos::Experimental::Impl::HIPTraits::WarpSize,
+                     warps_per_block);
+    const int shared = 0;
+
+    Kokkos::Experimental::Impl::HIPParallelLaunch<Self>(
+        *this, grid, block, shared,
+        Experimental::HIP().impl_internal_space_instance(), false);
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #define KOKKOS_HIP_WORKGRAPHPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX.cpp b/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX.cpp
new file mode 100644 (file)
index 0000000..6027ead
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#ifdef KOKKOS_ENABLE_HPX
+#include <Kokkos_HPX.hpp>
+
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <hpx/local/condition_variable.hpp>
+#include <hpx/local/init.hpp>
+#include <hpx/local/thread.hpp>
+#include <hpx/local/mutex.hpp>
+
+#include <atomic>
+#include <chrono>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <type_traits>
+
+namespace Kokkos {
+namespace Experimental {
+
+bool HPX::m_hpx_initialized = false;
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+std::atomic<uint32_t> HPX::m_next_instance_id{HPX::impl_default_instance_id() +
+                                              1};
+uint32_t HPX::m_active_parallel_region_count{0};
+hpx::spinlock HPX::m_active_parallel_region_count_mutex;
+hpx::condition_variable_any HPX::m_active_parallel_region_count_cond;
+HPX::instance_data HPX::m_default_instance_data;
+#else
+Kokkos::Impl::thread_buffer HPX::m_default_buffer;
+#endif
+
+int HPX::concurrency() {
+  hpx::runtime *rt = hpx::get_runtime_ptr();
+  if (rt == nullptr) {
+    return hpx::threads::hardware_concurrency();
+  } else {
+    if (hpx::threads::get_self_ptr() == nullptr) {
+      return hpx::resource::get_thread_pool(0).get_os_thread_count();
+    } else {
+      return hpx::this_thread::get_pool()->get_os_thread_count();
+    }
+  }
+}
+
+void HPX::impl_initialize(InitializationSettings const &settings) {
+  hpx::runtime *rt = hpx::get_runtime_ptr();
+  if (rt == nullptr) {
+    hpx::local::init_params i;
+    i.cfg = {
+#ifdef KOKKOS_ENABLE_DEBUG
+        "--hpx:attach-debugger=exception",
+#endif
+    };
+    if (settings.has_num_threads()) {
+      i.cfg.emplace_back("hpx.os_threads=" +
+                         std::to_string(settings.get_num_threads()));
+    }
+    int argc_hpx     = 1;
+    char name[]      = "kokkos_hpx";
+    char *argv_hpx[] = {name, nullptr};
+    hpx::local::start(nullptr, argc_hpx, argv_hpx, i);
+
+    m_hpx_initialized = true;
+  }
+}
+
+bool HPX::impl_is_initialized() noexcept {
+  hpx::runtime *rt = hpx::get_runtime_ptr();
+  return rt != nullptr;
+}
+
+void HPX::impl_finalize() {
+  if (m_hpx_initialized) {
+    hpx::runtime *rt = hpx::get_runtime_ptr();
+    if (rt != nullptr) {
+      hpx::apply([]() { hpx::local::finalize(); });
+      hpx::local::stop();
+    } else {
+      Kokkos::abort(
+          "Kokkos::Experimental::HPX::impl_finalize: Kokkos started "
+          "HPX but something else already stopped HPX\n");
+    }
+  }
+}
+
+}  // namespace Experimental
+
+namespace Impl {
+
+int g_hpx_space_factory_initialized =
+    initialize_space_factory<Kokkos::Experimental::HPX>("060_HPX");
+
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_CXX14
+namespace Tools {
+namespace Experimental {
+constexpr DeviceType DeviceTypeTraits<Kokkos::Experimental::HPX>::id;
+}
+}  // namespace Tools
+#endif
+
+}  // namespace Kokkos
+
+#else
+void KOKKOS_CORE_SRC_IMPL_HPX_PREVENT_LINK_ERROR() {}
+#endif  //#ifdef KOKKOS_ENABLE_HPX
diff --git a/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_Task.cpp b/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_Task.cpp
new file mode 100644 (file)
index 0000000..e61ac72
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<Kokkos::Experimental::HPX,
+                         Kokkos::Experimental::HPX::memory_space>;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#else
+void KOKKOS_CORE_SRC_IMPL_HPX_TASK_PREVENT_LINK_ERROR() {}
+#endif  // #if defined( KOKKOS_ENABLE_HPX ) && defined( KOKKOS_ENABLE_TASKDAG )
diff --git a/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_Task.hpp b/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_Task.hpp
new file mode 100644 (file)
index 0000000..67765a6
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HPX_TASK_HPP
+#define KOKKOS_HPX_TASK_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+
+#include <Kokkos_HPX.hpp>
+
+#include <hpx/local/execution.hpp>
+#include <hpx/local/future.hpp>
+
+#include <type_traits>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class QueueType>
+class TaskQueueSpecialization<
+    SimpleTaskScheduler<Kokkos::Experimental::HPX, QueueType>> {
+ public:
+  using execution_space = Kokkos::Experimental::HPX;
+  using scheduler_type =
+      SimpleTaskScheduler<Kokkos::Experimental::HPX, QueueType>;
+  using member_type =
+      TaskTeamMemberAdapter<Kokkos::Impl::HPXTeamMember, scheduler_type>;
+  using memory_space = Kokkos::HostSpace;
+
+  static void execute(scheduler_type const &scheduler) {
+    // NOTE: We create an instance so that we can use dispatch_execute_task.
+    // This is not necessarily the most efficient, but can be improved later.
+    TaskQueueSpecialization<scheduler_type> task_queue;
+    task_queue.scheduler = &scheduler;
+    Kokkos::Impl::dispatch_execute_task(&task_queue,
+                                        Kokkos::Experimental::HPX());
+    Kokkos::Experimental::HPX().fence(
+        "Kokkos::Impl::TaskQueueSpecialization<SimpleTask>::execute: fence "
+        "after task execution");
+  }
+
+  // Must provide task queue execution function
+  void execute_task() const {
+    // See [note 1] in Kokkos_HPX.hpp for an explanation. The work graph policy
+    // does not store an execution space instance, so we only need to reset the
+    // parallel region count here.
+    Kokkos::Experimental::HPX::reset_count_on_exit_parallel reset_count_on_exit;
+
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+    using task_base_type = typename scheduler_type::task_base_type;
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+
+    thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
+    buffer.resize(num_worker_threads, 512);
+
+    auto &queue = scheduler->queue();
+
+    for_loop(par.with(static_chunk_size(1)), 0, num_worker_threads,
+             [this, &queue, &buffer, num_worker_threads](int) {
+               // NOTE: This implementation has been simplified based on the
+               // assumption that team_size = 1. The HPX backend currently only
+               // supports a team size of 1.
+               std::size_t t =
+                   Kokkos::Experimental::HPX::impl_hardware_thread_id();
+
+               buffer.get(t);
+               HPXTeamMember member(
+                   TeamPolicyInternal<Kokkos::Experimental::HPX>(
+                       Kokkos::Experimental::HPX(), num_worker_threads, 1),
+                   0, t, buffer.get(t), 512);
+
+               member_type single_exec(*scheduler, member);
+               member_type &team_exec = single_exec;
+
+               auto &team_scheduler = team_exec.scheduler();
+               auto current_task    = OptionalRef<task_base_type>(nullptr);
+
+               while (!queue.is_done()) {
+                 current_task =
+                     queue.pop_ready_task(team_scheduler.team_scheduler_info());
+
+                 if (current_task) {
+                   KOKKOS_ASSERT(current_task->is_single_runnable() ||
+                                 current_task->is_team_runnable());
+                   current_task->as_runnable_task().run(single_exec);
+                   queue.complete((*std::move(current_task)).as_runnable_task(),
+                                  team_scheduler.team_scheduler_info());
+                 }
+               }
+             });
+  }
+
+  static uint32_t get_max_team_count(execution_space const &espace) {
+    return static_cast<uint32_t>(espace.concurrency());
+  }
+
+  template <typename TaskType>
+  static void get_function_pointer(typename TaskType::function_type &ptr,
+                                   typename TaskType::destroy_type &dtor) {
+    ptr  = TaskType::apply;
+    dtor = TaskType::destroy;
+  }
+
+ private:
+  const scheduler_type *scheduler;
+};
+
+template <class Scheduler>
+class TaskQueueSpecializationConstrained<
+    Scheduler,
+    std::enable_if_t<std::is_same<typename Scheduler::execution_space,
+                                  Kokkos::Experimental::HPX>::value>> {
+ public:
+  using execution_space = Kokkos::Experimental::HPX;
+  using scheduler_type  = Scheduler;
+  using member_type =
+      TaskTeamMemberAdapter<Kokkos::Impl::HPXTeamMember, scheduler_type>;
+  using memory_space = Kokkos::HostSpace;
+
+  static void iff_single_thread_recursive_execute(
+      scheduler_type const &scheduler) {
+    using task_base_type = typename scheduler_type::task_base;
+    using queue_type     = typename scheduler_type::queue_type;
+
+    if (1 == Kokkos::Experimental::HPX::concurrency()) {
+      task_base_type *const end = (task_base_type *)task_base_type::EndTag;
+      task_base_type *task      = end;
+
+      HPXTeamMember member(TeamPolicyInternal<Kokkos::Experimental::HPX>(
+                               Kokkos::Experimental::HPX(), 1, 1),
+                           0, 0, nullptr, 0);
+      member_type single_exec(scheduler, member);
+
+      do {
+        task = end;
+
+        // Loop by priority and then type
+        for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+          for (int j = 0; j < 2 && end == task; ++j) {
+            task =
+                queue_type::pop_ready_task(&scheduler.m_queue->m_ready[i][j]);
+          }
+        }
+
+        if (end == task) break;
+
+        (*task->m_apply)(task, &single_exec);
+
+        scheduler.m_queue->complete(task);
+
+      } while (true);
+    }
+  }
+
+  static void execute(scheduler_type const &scheduler) {
+    // NOTE: We create an instance so that we can use dispatch_execute_task.
+    // This is not necessarily the most efficient, but can be improved later.
+    TaskQueueSpecializationConstrained<scheduler_type> task_queue;
+    task_queue.scheduler = &scheduler;
+    Kokkos::Impl::dispatch_execute_task(&task_queue,
+                                        Kokkos::Experimental::HPX());
+    Kokkos::Experimental::HPX().fence(
+        "Kokkos::Impl::TaskQueueSpecialization<SimpleTask>::execute: fence "
+        "after task execution");
+  }
+
+  // Must provide task queue execution function
+  void execute_task() const {
+    // See [note 1] in Kokkos_HPX.hpp for an explanation. The work graph policy
+    // does not store an execution space instance, so we only need to reset the
+    // parallel region count here.
+    Kokkos::Experimental::HPX::reset_count_on_exit_parallel reset_count_on_exit;
+
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+    using task_base_type = typename scheduler_type::task_base;
+    using queue_type     = typename scheduler_type::queue_type;
+
+    const int num_worker_threads     = Kokkos::Experimental::HPX::concurrency();
+    static task_base_type *const end = (task_base_type *)task_base_type::EndTag;
+    constexpr task_base_type *no_more_tasks_sentinel = nullptr;
+
+    thread_buffer &buffer = Kokkos::Experimental::HPX().impl_get_buffer();
+    buffer.resize(num_worker_threads, 512);
+
+    auto &queue = scheduler->queue();
+    queue.initialize_team_queues(num_worker_threads);
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+        [this, &buffer, num_worker_threads](int t) {
+          // NOTE: This implementation has been simplified based on the
+          // assumption that team_size = 1. The HPX backend currently only
+          // supports a team size of 1.
+          buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id());
+          HPXTeamMember member(
+              TeamPolicyInternal<Kokkos::Experimental::HPX>(
+                  Kokkos::Experimental::HPX(), num_worker_threads, 1),
+              0, t, buffer.get(t), 512);
+
+          member_type single_exec(*scheduler, member);
+          member_type &team_exec = single_exec;
+
+          auto &team_queue     = team_exec.scheduler().queue();
+          task_base_type *task = no_more_tasks_sentinel;
+
+          do {
+            if (task != no_more_tasks_sentinel && task != end) {
+              team_queue.complete(task);
+            }
+
+            if (*((volatile int *)&team_queue.m_ready_count) > 0) {
+              task = end;
+              for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+                for (int j = 0; j < 2 && end == task; ++j) {
+                  task = queue_type::pop_ready_task(&team_queue.m_ready[i][j]);
+                }
+              }
+            } else {
+              task = team_queue.attempt_to_steal_task();
+            }
+
+            if (task != no_more_tasks_sentinel && task != end) {
+              (*task->m_apply)(task, &single_exec);
+            }
+          } while (task != no_more_tasks_sentinel);
+        });
+  }
+
+  template <typename TaskType>
+  static void get_function_pointer(typename TaskType::function_type &ptr,
+                                   typename TaskType::destroy_type &dtor) {
+    ptr  = TaskType::apply;
+    dtor = TaskType::destroy;
+  }
+
+ private:
+  const scheduler_type *scheduler;
+};
+
+extern template class TaskQueue<
+    Kokkos::Experimental::HPX,
+    typename Kokkos::Experimental::HPX::memory_space>;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_HPX_TASK_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_WorkGraphPolicy.hpp b/bundled/kokkos-3.7.00/core/src/HPX/Kokkos_HPX_WorkGraphPolicy.hpp
new file mode 100644 (file)
index 0000000..5f2eff5
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HPX_WORKGRAPHPOLICY_HPP
+#define KOKKOS_HPX_WORKGRAPHPOLICY_HPP
+
+#include <Kokkos_HPX.hpp>
+
+#include <hpx/local/algorithm.hpp>
+#include <hpx/local/execution.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+                  Kokkos::Experimental::HPX> {
+ private:
+  using Policy  = Kokkos::WorkGraphPolicy<Traits...>;
+  using WorkTag = typename Policy::work_tag;
+
+  Policy m_policy;
+  FunctorType m_functor;
+
+  template <class TagType>
+  std::enable_if_t<std::is_void<TagType>::value> execute_functor(
+      const std::int32_t w) const noexcept {
+    m_functor(w);
+  }
+
+  template <class TagType>
+  std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
+      const std::int32_t w) const noexcept {
+    const TagType t{};
+    m_functor(t, w);
+  }
+
+ public:
+  void execute() const {
+    dispatch_execute_task(this, m_policy.space());
+    m_policy.space().fence(
+        "Kokkos::Experimental::Impl::HPX::ParallelFor<WorkGraphPolicy>: fence "
+        "after kernel execution");
+  }
+
+  void execute_task() const {
+    // See [note 1] in Kokkos_HPX.hpp for an explanation. The work graph policy
+    // does not store an execution space instance, so we only need to reset the
+    // parallel region count here.
+    Kokkos::Experimental::HPX::reset_count_on_exit_parallel reset_count_on_exit;
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    for_loop(par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+             [this](int) {
+               std::int32_t w = m_policy.pop_work();
+               while (w != Policy::COMPLETED_TOKEN) {
+                 if (w != Policy::END_TOKEN) {
+                   execute_functor<WorkTag>(w);
+                   m_policy.completed_work(w);
+                 }
+
+                 w = m_policy.pop_work();
+               }
+             });
+  }
+
+  inline ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+      : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #define KOKKOS_HPX_WORKGRAPHPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_DeclareBackend.hpp b/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_DeclareBackend.hpp
new file mode 100644 (file)
index 0000000..2d53ef0
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_DECLARE_HPP_
+#define KOKKOS_DECLARE_HPP_
+
+#include <decl/Kokkos_Declare_SERIAL.hpp>
+
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_FwdBackend.hpp b/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_FwdBackend.hpp
new file mode 100644 (file)
index 0000000..5f1b60f
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_FWD_HPP_
+#define KOKKOS_FWD_HPP_
+
+#include <fwd/Kokkos_Fwd_SERIAL.hpp>
+
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_PostInclude.hpp b/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_PostInclude.hpp
new file mode 100644 (file)
index 0000000..6be71a1
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_POST_INCLUDE_HPP_
+#define KOKKOS_POST_INCLUDE_HPP_
+
+
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_SetupBackend.hpp b/bundled/kokkos-3.7.00/core/src/KokkosCore_Config_SetupBackend.hpp
new file mode 100644 (file)
index 0000000..0e21f06
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_SETUP_HPP_
+#define KOKKOS_SETUP_HPP_
+
+
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/KokkosCore_config.h b/bundled/kokkos-3.7.00/core/src/KokkosCore_config.h
new file mode 100644 (file)
index 0000000..744267c
--- /dev/null
@@ -0,0 +1,111 @@
+
+#if !defined(KOKKOS_MACROS_HPP) || defined(KOKKOS_CORE_CONFIG_H)
+#error \
+    "Do not include KokkosCore_config.h directly; include Kokkos_Macros.hpp instead."
+#else
+#define KOKKOS_CORE_CONFIG_H
+#endif
+
+// KOKKOS_VERSION % 100 is the patch level
+// KOKKOS_VERSION / 100 % 100 is the minor version
+// KOKKOS_VERSION / 10000 is the major version
+#define KOKKOS_VERSION 30700
+
+/* Execution Spaces */
+#define KOKKOS_ENABLE_SERIAL
+/* #undef KOKKOS_ENABLE_OPENMP */
+/* #undef KOKKOS_ENABLE_OPENACC */
+/* #undef KOKKOS_ENABLE_OPENMPTARGET */
+/* #undef KOKKOS_ENABLE_THREADS */
+/* #undef KOKKOS_ENABLE_CUDA */
+/* #undef KOKKOS_ENABLE_HIP */
+/* #undef KOKKOS_ENABLE_HPX */
+/* #undef KOKKOS_ENABLE_MEMKIND */
+/* #undef KOKKOS_ENABLE_LIBRT */
+/* #undef KOKKOS_ENABLE_SYCL */
+
+/* General Settings */
+#define KOKKOS_ENABLE_CXX14
+/* #undef KOKKOS_ENABLE_CXX17 */
+/* #undef KOKKOS_ENABLE_CXX20 */
+
+/* #undef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE */
+/* #undef KOKKOS_ENABLE_CUDA_UVM */
+/* #undef KOKKOS_ENABLE_CUDA_LAMBDA */
+/* #undef KOKKOS_ENABLE_CUDA_CONSTEXPR */
+/* #undef KOKKOS_ENABLE_CUDA_LDG_INTRINSIC */
+/* #undef KOKKOS_ENABLE_IMPL_CUDA_MALLOC_ASYNC */
+/* #undef KOKKOS_ENABLE_HIP_RELOCATABLE_DEVICE_CODE */
+/* #undef KOKKOS_ENABLE_HIP_MULTIPLE_KERNEL_INSTANTIATIONS */
+/* #undef KOKKOS_ENABLE_HPX_ASYNC_DISPATCH */
+/* #undef KOKKOS_ENABLE_DEBUG */
+/* #undef KOKKOS_ENABLE_DEBUG_DUALVIEW_MODIFY_CHECK */
+/* #undef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK */
+/* #undef KOKKOS_ENABLE_PROFILING_LOAD_PRINT */
+/* #undef KOKKOS_ENABLE_TUNING */
+#define KOKKOS_ENABLE_DEPRECATED_CODE_3
+#define KOKKOS_ENABLE_DEPRECATION_WARNINGS
+/* #undef KOKKOS_ENABLE_LARGE_MEM_TESTS */
+#define KOKKOS_ENABLE_COMPLEX_ALIGN
+#define KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+/* #undef KOKKOS_OPT_RANGE_AGGRESSIVE_VECTORIZATION */
+/* #undef KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION */
+
+/* TPL Settings */
+/* #undef KOKKOS_ENABLE_HWLOC */
+/* #undef KOKKOS_USE_LIBRT */
+/* #undef KOKKOS_ENABLE_HBWSPACE */
+/* #undef KOKKOS_ENABLE_LIBDL */
+/* #undef KOKKOS_ENABLE_LIBQUADMATH */
+/* #undef KOKKOS_IMPL_CUDA_CLANG_WORKAROUND */
+
+/* #undef KOKKOS_COMPILER_CUDA_VERSION */
+
+/* #undef KOKKOS_ARCH_SSE42 */
+/* #undef KOKKOS_ARCH_ARMV80 */
+/* #undef KOKKOS_ARCH_ARMV8_THUNDERX */
+/* #undef KOKKOS_ARCH_ARMV81 */
+/* #undef KOKKOS_ARCH_ARMV8_THUNDERX2 */
+/* #undef KOKKOS_ARCH_AMD_AVX2 */
+/* #undef KOKKOS_ARCH_AVX */
+/* #undef KOKKOS_ARCH_AVX2 */
+/* #undef KOKKOS_ARCH_AVX512XEON */
+/* #undef KOKKOS_ARCH_KNC */
+/* #undef KOKKOS_ARCH_AVX512MIC */
+/* #undef KOKKOS_ARCH_POWER7 */
+/* #undef KOKKOS_ARCH_POWER8 */
+/* #undef KOKKOS_ARCH_POWER9 */
+/* #undef KOKKOS_ARCH_INTEL_GEN */
+/* #undef KOKKOS_ARCH_INTEL_DG1 */
+/* #undef KOKKOS_ARCH_INTEL_GEN9 */
+/* #undef KOKKOS_ARCH_INTEL_GEN11 */
+/* #undef KOKKOS_ARCH_INTEL_GEN12LP */
+/* #undef KOKKOS_ARCH_INTEL_XEHP */
+/* #undef KOKKOS_ARCH_INTEL_GPU */
+/* #undef KOKKOS_ARCH_KEPLER */
+/* #undef KOKKOS_ARCH_KEPLER30 */
+/* #undef KOKKOS_ARCH_KEPLER32 */
+/* #undef KOKKOS_ARCH_KEPLER35 */
+/* #undef KOKKOS_ARCH_KEPLER37 */
+/* #undef KOKKOS_ARCH_MAXWELL */
+/* #undef KOKKOS_ARCH_MAXWELL50 */
+/* #undef KOKKOS_ARCH_MAXWELL52 */
+/* #undef KOKKOS_ARCH_MAXWELL53 */
+/* #undef KOKKOS_ARCH_PASCAL */
+/* #undef KOKKOS_ARCH_PASCAL60 */
+/* #undef KOKKOS_ARCH_PASCAL61 */
+/* #undef KOKKOS_ARCH_VOLTA */
+/* #undef KOKKOS_ARCH_VOLTA70 */
+/* #undef KOKKOS_ARCH_VOLTA72 */
+/* #undef KOKKOS_ARCH_TURING75 */
+/* #undef KOKKOS_ARCH_AMPERE */
+/* #undef KOKKOS_ARCH_AMPERE80 */
+/* #undef KOKKOS_ARCH_AMPERE86 */
+/* #undef KOKKOS_ARCH_AMD_ZEN */
+/* #undef KOKKOS_ARCH_AMD_ZEN2 */
+/* #undef KOKKOS_ARCH_AMD_ZEN3 */
+/* #undef KOKKOS_ARCH_VEGA */
+/* #undef KOKKOS_ARCH_VEGA900 */
+/* #undef KOKKOS_ARCH_VEGA906 */
+/* #undef KOKKOS_ARCH_VEGA908 */
+/* #undef KOKKOS_ARCH_VEGA90A */
diff --git a/bundled/kokkos-3.7.00/core/src/KokkosExp_InterOp.hpp b/bundled/kokkos-3.7.00/core/src/KokkosExp_InterOp.hpp
new file mode 100644 (file)
index 0000000..0522ad7
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CORE_EXP_INTEROP_HPP
+#define KOKKOS_CORE_EXP_INTEROP_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_INTEROP
+#endif
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <Kokkos_View.hpp>
+#include <impl/Kokkos_Utilities.hpp>
+#include <type_traits>
+
+namespace Kokkos {
+namespace Impl {
+
+// ------------------------------------------------------------------ //
+//  this is used to convert
+//      Kokkos::Device<ExecSpace, MemSpace> to MemSpace
+//
+template <typename Tp>
+struct device_memory_space {
+  using type = Tp;
+};
+
+template <typename ExecT, typename MemT>
+struct device_memory_space<Kokkos::Device<ExecT, MemT>> {
+  using type = MemT;
+};
+
+template <typename Tp>
+using device_memory_space_t = typename device_memory_space<Tp>::type;
+
+// ------------------------------------------------------------------ //
+//  this is the impl version which takes a view and converts to python
+//  view type
+//
+template <typename, typename...>
+struct python_view_type_impl;
+
+template <template <typename...> class ViewT, typename ValueT,
+          typename... Types>
+struct python_view_type_impl<ViewT<ValueT>, type_list<Types...>> {
+  using type = ViewT<ValueT, device_memory_space_t<Types>...>;
+};
+
+template <template <typename...> class ViewT, typename ValueT,
+          typename... Types>
+struct python_view_type_impl<ViewT<ValueT, Types...>>
+    : python_view_type_impl<ViewT<ValueT>,
+                            filter_type_list_t<is_default_memory_trait,
+                                               type_list<Types...>, false>> {};
+
+template <typename... T>
+using python_view_type_impl_t = typename python_view_type_impl<T...>::type;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <typename DataType, class... Properties>
+class DynRankView;
+
+namespace Impl {
+
+// Duplicate from the header file for DynRankView to avoid core depending on
+// containers.
+template <class>
+struct is_dyn_rank_view_dup : public std::false_type {};
+
+template <class D, class... P>
+struct is_dyn_rank_view_dup<Kokkos::DynRankView<D, P...>>
+    : public std::true_type {};
+
+}  // namespace Impl
+
+namespace Experimental {
+
+// ------------------------------------------------------------------ //
+//  this is used to extract the uniform type of a view
+//
+template <typename ViewT>
+struct python_view_type {
+  static_assert(
+      Kokkos::is_view<std::decay_t<ViewT>>::value ||
+          Kokkos::Impl::is_dyn_rank_view_dup<std::decay_t<ViewT>>::value,
+      "Error! python_view_type only supports Kokkos::View and "
+      "Kokkos::DynRankView");
+
+  using type =
+      Kokkos::Impl::python_view_type_impl_t<typename ViewT::array_type>;
+};
+
+template <typename ViewT>
+using python_view_type_t = typename python_view_type<ViewT>::type;
+
+template <typename Tp>
+auto as_python_type(Tp&& _v) {
+  using cast_type = python_view_type_t<Tp>;
+  return static_cast<cast_type>(std::forward<Tp>(_v));
+}
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_INTEROP
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_INTEROP
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/KokkosExp_MDRangePolicy.hpp b/bundled/kokkos-3.7.00/core/src/KokkosExp_MDRangePolicy.hpp
new file mode 100644 (file)
index 0000000..64b31c7
--- /dev/null
@@ -0,0 +1,408 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_CORE_EXP_MD_RANGE_POLICY_HPP
+#define KOKKOS_CORE_EXP_MD_RANGE_POLICY_HPP
+
+#include <initializer_list>
+
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_Rank.hpp>
+#include <Kokkos_Array.hpp>
+#include <impl/KokkosExp_Host_IterateTile.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <type_traits>
+
+namespace Kokkos {
+
+// ------------------------------------------------------------------ //
+// Moved to Kokkos_Layout.hpp for more general accessibility
+/*
+enum class Iterate
+{
+  Default, // Default for the device
+  Left,    // Left indices stride fastest
+  Right,   // Right indices stride fastest
+};
+*/
+
+template <typename ExecSpace>
+struct default_outer_direction {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Right;
+};
+
+template <typename ExecSpace>
+struct default_inner_direction {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Right;
+};
+
+namespace Impl {
+// NOTE the comparison below is encapsulated to silent warnings about pointless
+// comparison of unsigned integer with zero
+template <class T>
+constexpr std::enable_if_t<!std::is_signed<T>::value, bool>
+is_less_than_value_initialized_variable(T) {
+  return false;
+}
+
+template <class T>
+constexpr std::enable_if_t<std::is_signed<T>::value, bool>
+is_less_than_value_initialized_variable(T arg) {
+  return arg < T{};
+}
+
+// Checked narrowing conversion that calls abort if the cast changes the value
+template <class To, class From>
+constexpr To checked_narrow_cast(From arg) {
+  constexpr const bool is_different_signedness =
+      (std::is_signed<To>::value != std::is_signed<From>::value);
+  auto const ret = static_cast<To>(arg);
+  if (static_cast<From>(ret) != arg ||
+      (is_different_signedness &&
+       is_less_than_value_initialized_variable(arg) !=
+           is_less_than_value_initialized_variable(ret))) {
+    Kokkos::abort("unsafe narrowing conversion");
+  }
+  return ret;
+}
+// NOTE prefer C array U[M] to std::initalizer_list<U> so that the number of
+// elements can be deduced (https://stackoverflow.com/q/40241370)
+// NOTE for some unfortunate reason the policy bounds are stored as signed
+// integer arrays (point_type which is Kokkos::Array<std::int64_t>) so we
+// specify the index type (actual policy index_type from the traits) and check
+// ahead of time that narrowing conversions will be safe.
+template <class IndexType, class Array, class U, std::size_t M>
+constexpr Array to_array_potentially_narrowing(const U (&init)[M]) {
+  using T = typename Array::value_type;
+  Array a{};
+  constexpr std::size_t N = a.size();
+  static_assert(M <= N, "");
+  auto* ptr = a.data();
+  // NOTE equivalent to
+  // std::transform(std::begin(init), std::end(init), a.data(),
+  //                [](U x) { return static_cast<T>(x); });
+  // except that std::transform is not constexpr.
+  for (auto x : init) {
+    *ptr++ = checked_narrow_cast<T>(x);
+    (void)checked_narrow_cast<IndexType>(x);  // see note above
+  }
+  return a;
+}
+
+// NOTE Making a copy even when std::is_same<Array, Kokkos::Array<U, M>>::value
+// is true to reduce code complexity.  You may change this if you have a good
+// reason to.  Intentionally not enabling std::array at this time but this may
+// change too.
+template <class IndexType, class NVCC_WONT_LET_ME_CALL_YOU_Array, class U,
+          std::size_t M>
+constexpr NVCC_WONT_LET_ME_CALL_YOU_Array to_array_potentially_narrowing(
+    Kokkos::Array<U, M> const& other) {
+  using T = typename NVCC_WONT_LET_ME_CALL_YOU_Array::value_type;
+  NVCC_WONT_LET_ME_CALL_YOU_Array a{};
+  constexpr std::size_t N = a.size();
+  static_assert(M <= N, "");
+  for (std::size_t i = 0; i < M; ++i) {
+    a[i] = checked_narrow_cast<T>(other[i]);
+    (void)checked_narrow_cast<IndexType>(other[i]);  // see note above
+  }
+  return a;
+}
+
+struct TileSizeProperties {
+  int max_threads;
+  int default_largest_tile_size;
+  int default_tile_size;
+  int max_total_tile_size;
+};
+
+template <typename ExecutionSpace>
+TileSizeProperties get_tile_size_properties(const ExecutionSpace&) {
+  // Host settings
+  TileSizeProperties properties;
+  properties.max_threads               = std::numeric_limits<int>::max();
+  properties.default_largest_tile_size = 0;
+  properties.default_tile_size         = 2;
+  properties.max_total_tile_size       = std::numeric_limits<int>::max();
+  return properties;
+}
+
+}  // namespace Impl
+
+// multi-dimensional iteration pattern
+template <typename... Properties>
+struct MDRangePolicy : public Kokkos::Impl::PolicyTraits<Properties...> {
+  using traits       = Kokkos::Impl::PolicyTraits<Properties...>;
+  using range_policy = RangePolicy<Properties...>;
+
+  typename traits::execution_space m_space;
+
+  using impl_range_policy =
+      RangePolicy<typename traits::execution_space,
+                  typename traits::schedule_type, typename traits::index_type>;
+
+  using execution_policy =
+      MDRangePolicy<Properties...>;  // needed for is_execution_space
+                                     // interrogation
+
+  template <class... OtherProperties>
+  friend struct MDRangePolicy;
+
+  static_assert(!std::is_void<typename traits::iteration_pattern>::value,
+                "Kokkos Error: MD iteration pattern not defined");
+
+  using iteration_pattern = typename traits::iteration_pattern;
+  using work_tag          = typename traits::work_tag;
+  using launch_bounds     = typename traits::launch_bounds;
+  using member_type       = typename range_policy::member_type;
+
+  static constexpr int rank = iteration_pattern::rank;
+
+  using index_type       = typename traits::index_type;
+  using array_index_type = std::int64_t;
+  using point_type = Kokkos::Array<array_index_type, rank>;  // was index_type
+  using tile_type  = Kokkos::Array<array_index_type, rank>;
+  // If point_type or tile_type is not templated on a signed integral type (if
+  // it is unsigned), then if user passes in intializer_list of
+  // runtime-determined values of signed integral type that are not const will
+  // receive a compiler error due to an invalid case for implicit conversion -
+  // "conversion from integer or unscoped enumeration type to integer type that
+  // cannot represent all values of the original, except where source is a
+  // constant expression whose value can be stored exactly in the target type"
+  // This would require the user to either pass a matching index_type parameter
+  // as template parameter to the MDRangePolicy or static_cast the individual
+  // values
+
+  point_type m_lower          = {};
+  point_type m_upper          = {};
+  tile_type m_tile            = {};
+  point_type m_tile_end       = {};
+  index_type m_num_tiles      = 1;
+  index_type m_prod_tile_dims = 1;
+  bool m_tune_tile_size       = false;
+
+  static constexpr auto outer_direction =
+      (iteration_pattern::outer_direction != Iterate::Default)
+          ? iteration_pattern::outer_direction
+          : default_outer_direction<typename traits::execution_space>::value;
+
+  static constexpr auto inner_direction =
+      iteration_pattern::inner_direction != Iterate::Default
+          ? iteration_pattern::inner_direction
+          : default_inner_direction<typename traits::execution_space>::value;
+
+  static constexpr auto Right = Iterate::Right;
+  static constexpr auto Left  = Iterate::Left;
+
+  KOKKOS_INLINE_FUNCTION const typename traits::execution_space& space() const {
+    return m_space;
+  }
+
+  MDRangePolicy() = default;
+
+  template <typename LT, std::size_t LN, typename UT, std::size_t UN,
+            typename TT = array_index_type, std::size_t TN = rank,
+            typename = std::enable_if_t<std::is_integral<LT>::value &&
+                                        std::is_integral<UT>::value &&
+                                        std::is_integral<TT>::value>>
+  MDRangePolicy(const LT (&lower)[LN], const UT (&upper)[UN],
+                const TT (&tile)[TN] = {})
+      : MDRangePolicy(
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_lower)>(
+                lower),
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_upper)>(
+                upper),
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_tile)>(
+                tile)) {
+    static_assert(
+        LN == rank && UN == rank && TN <= rank,
+        "MDRangePolicy: Constructor initializer lists have wrong size");
+  }
+
+  template <typename LT, std::size_t LN, typename UT, std::size_t UN,
+            typename TT = array_index_type, std::size_t TN = rank,
+            typename = std::enable_if_t<std::is_integral<LT>::value &&
+                                        std::is_integral<UT>::value &&
+                                        std::is_integral<TT>::value>>
+  MDRangePolicy(const typename traits::execution_space& work_space,
+                const LT (&lower)[LN], const UT (&upper)[UN],
+                const TT (&tile)[TN] = {})
+      : MDRangePolicy(
+            work_space,
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_lower)>(
+                lower),
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_upper)>(
+                upper),
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_tile)>(
+                tile)) {
+    static_assert(
+        LN == rank && UN == rank && TN <= rank,
+        "MDRangePolicy: Constructor initializer lists have wrong size");
+  }
+
+  // NOTE: Keeping these two constructor despite the templated constructors
+  // from Kokkos arrays for backwards compability to allow construction from
+  // double-braced initializer lists.
+  MDRangePolicy(point_type const& lower, point_type const& upper,
+                tile_type const& tile = tile_type{})
+      : MDRangePolicy(typename traits::execution_space(), lower, upper, tile) {}
+
+  MDRangePolicy(const typename traits::execution_space& work_space,
+                point_type const& lower, point_type const& upper,
+                tile_type const& tile = tile_type{})
+      : m_space(work_space), m_lower(lower), m_upper(upper), m_tile(tile) {
+    init_helper(Impl::get_tile_size_properties(work_space));
+  }
+
+  template <typename T, std::size_t NT = rank,
+            typename = std::enable_if_t<std::is_integral<T>::value>>
+  MDRangePolicy(Kokkos::Array<T, rank> const& lower,
+                Kokkos::Array<T, rank> const& upper,
+                Kokkos::Array<T, NT> const& tile = Kokkos::Array<T, NT>{})
+      : MDRangePolicy(typename traits::execution_space(), lower, upper, tile) {}
+
+  template <typename T, std::size_t NT = rank,
+            typename = std::enable_if_t<std::is_integral<T>::value>>
+  MDRangePolicy(const typename traits::execution_space& work_space,
+                Kokkos::Array<T, rank> const& lower,
+                Kokkos::Array<T, rank> const& upper,
+                Kokkos::Array<T, NT> const& tile = Kokkos::Array<T, NT>{})
+      : MDRangePolicy(
+            work_space,
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_lower)>(
+                lower),
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_upper)>(
+                upper),
+            Impl::to_array_potentially_narrowing<index_type, decltype(m_tile)>(
+                tile)) {}
+
+  template <class... OtherProperties>
+  MDRangePolicy(const MDRangePolicy<OtherProperties...> p)
+      : traits(p),  // base class may contain data such as desired occupancy
+        m_space(p.m_space),
+        m_lower(p.m_lower),
+        m_upper(p.m_upper),
+        m_tile(p.m_tile),
+        m_tile_end(p.m_tile_end),
+        m_num_tiles(p.m_num_tiles),
+        m_prod_tile_dims(p.m_prod_tile_dims),
+        m_tune_tile_size(p.m_tune_tile_size) {}
+
+  void impl_change_tile_size(const point_type& tile) {
+    m_tile = tile;
+    init_helper(Impl::get_tile_size_properties(m_space));
+  }
+  bool impl_tune_tile_size() const { return m_tune_tile_size; }
+
+ private:
+  void init_helper(Impl::TileSizeProperties properties) {
+    m_prod_tile_dims = 1;
+    int increment    = 1;
+    int rank_start   = 0;
+    int rank_end     = rank;
+    if (inner_direction == Iterate::Right) {
+      increment  = -1;
+      rank_start = rank - 1;
+      rank_end   = -1;
+    }
+    for (int i = rank_start; i != rank_end; i += increment) {
+      const index_type length = m_upper[i] - m_lower[i];
+      if (m_tile[i] <= 0) {
+        m_tune_tile_size = true;
+        if ((inner_direction == Iterate::Right && (i < rank - 1)) ||
+            (inner_direction == Iterate::Left && (i > 0))) {
+          if (m_prod_tile_dims * properties.default_tile_size <
+              static_cast<index_type>(properties.max_total_tile_size)) {
+            m_tile[i] = properties.default_tile_size;
+          } else {
+            m_tile[i] = 1;
+          }
+        } else {
+          m_tile[i] = properties.default_largest_tile_size == 0
+                          ? std::max<int>(length, 1)
+                          : properties.default_largest_tile_size;
+        }
+      }
+      m_tile_end[i] =
+          static_cast<index_type>((length + m_tile[i] - 1) / m_tile[i]);
+      m_num_tiles *= m_tile_end[i];
+      m_prod_tile_dims *= m_tile[i];
+    }
+    if (m_prod_tile_dims > static_cast<index_type>(properties.max_threads)) {
+      printf(" Product of tile dimensions exceed maximum limit: %d\n",
+             static_cast<int>(properties.max_threads));
+      Kokkos::abort(
+          "ExecSpace Error: MDRange tile dims exceed maximum number "
+          "of threads per block - choose smaller tile dims");
+    }
+  }
+};
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+// For backward compatibility
+namespace Kokkos {
+namespace Experimental {
+using Iterate KOKKOS_DEPRECATED = Kokkos::Iterate;
+template <typename... Properties>
+using MDRangePolicy KOKKOS_DEPRECATED = Kokkos::MDRangePolicy<Properties...>;
+template <unsigned N, Kokkos::Iterate OuterDir = Kokkos::Iterate::Default,
+          Kokkos::Iterate InnerDir = Kokkos::Iterate::Default>
+using Rank KOKKOS_DEPRECATED = Kokkos::Rank<N, OuterDir, InnerDir>;
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
+
+#endif  // KOKKOS_CORE_EXP_MD_RANGE_POLICY_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_AcquireUniqueTokenImpl.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_AcquireUniqueTokenImpl.hpp
new file mode 100644 (file)
index 0000000..4a22aed
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_ACQUIRE_UNIQUE_TOKEN_IMPL_HPP
+#define KOKKOS_ACQUIRE_UNIQUE_TOKEN_IMPL_HPP
+
+#include <Kokkos_Core.hpp>
+#include <Kokkos_UniqueToken.hpp>
+namespace Kokkos {
+namespace Experimental {
+
+template <typename TeamPolicy>
+KOKKOS_FUNCTION AcquireTeamUniqueToken<TeamPolicy>::AcquireTeamUniqueToken(
+    AcquireTeamUniqueToken<TeamPolicy>::token_type t, team_member_type team)
+    : my_token(t), my_team_acquired_val(team.team_scratch(0)), my_team(team) {
+  Kokkos::single(Kokkos::PerTeam(my_team),
+                 [&]() { my_team_acquired_val() = my_token.acquire(); });
+  my_team.team_barrier();
+
+  my_acquired_val = my_team_acquired_val();
+}
+
+template <typename TeamPolicy>
+KOKKOS_FUNCTION AcquireTeamUniqueToken<TeamPolicy>::~AcquireTeamUniqueToken() {
+  my_team.team_barrier();
+  Kokkos::single(Kokkos::PerTeam(my_team),
+                 [&]() { my_token.release(my_acquired_val); });
+  my_team.team_barrier();
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif  // KOKKOS_UNIQUE_TOKEN_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_AnonymousSpace.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_AnonymousSpace.hpp
new file mode 100644 (file)
index 0000000..6eed92b
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_ANONYMOUSSPACE_HPP
+#define KOKKOS_ANONYMOUSSPACE_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Concepts.hpp>
+#include <cstddef>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+class AnonymousSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space    = AnonymousSpace;
+  using execution_space = Kokkos::DefaultExecutionSpace;
+  using size_type       = size_t;
+
+  //! This memory space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  /**\brief  Default memory space instance */
+  AnonymousSpace()                          = default;
+  AnonymousSpace(AnonymousSpace &&rhs)      = default;
+  AnonymousSpace(const AnonymousSpace &rhs) = default;
+  AnonymousSpace &operator=(AnonymousSpace &&) = default;
+  AnonymousSpace &operator=(const AnonymousSpace &) = default;
+  ~AnonymousSpace()                                 = default;
+
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char *name() { return "Anonymous"; }
+};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <typename OtherSpace>
+struct MemorySpaceAccess<Kokkos::AnonymousSpace, OtherSpace> {
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <typename OtherSpace>
+struct MemorySpaceAccess<OtherSpace, Kokkos::AnonymousSpace> {
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::AnonymousSpace, Kokkos::AnonymousSpace> {
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+#endif  // #define KOKKOS_ANONYMOUSSPACE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Array.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Array.hpp
new file mode 100644 (file)
index 0000000..e7fec4c
--- /dev/null
@@ -0,0 +1,403 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_ARRAY_HPP
+#define KOKKOS_ARRAY_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ARRAY
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_StringManipulation.hpp>
+
+#include <type_traits>
+#include <algorithm>
+#include <utility>
+#include <limits>
+#include <cstddef>
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+namespace Impl {
+template <typename Integral, bool Signed = std::is_signed<Integral>::value>
+struct ArrayBoundsCheck;
+
+template <typename Integral>
+struct ArrayBoundsCheck<Integral, true> {
+  KOKKOS_INLINE_FUNCTION
+  constexpr ArrayBoundsCheck(Integral i, size_t N) {
+    if (i < 0) {
+      char err[128] = "Kokkos::Array: index ";
+      to_chars_i(err + strlen(err), err + 128, i);
+      strcat(err, " < 0");
+      Kokkos::abort(err);
+    }
+    ArrayBoundsCheck<Integral, false>(i, N);
+  }
+};
+
+template <typename Integral>
+struct ArrayBoundsCheck<Integral, false> {
+  KOKKOS_INLINE_FUNCTION
+  constexpr ArrayBoundsCheck(Integral i, size_t N) {
+    if (size_t(i) >= N) {
+      char err[128] = "Kokkos::Array: index ";
+      to_chars_i(err + strlen(err), err + 128, i);
+      strcat(err, " >= ");
+      to_chars_i(err + strlen(err), err + 128, N);
+      Kokkos::abort(err);
+    }
+  }
+};
+}  // end namespace Impl
+
+#define KOKKOS_ARRAY_BOUNDS_CHECK(i, N) \
+  Kokkos::Impl::ArrayBoundsCheck<decltype(i)>(i, N)
+
+#else  // !defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK )
+
+#define KOKKOS_ARRAY_BOUNDS_CHECK(i, N) (void)0
+
+#endif  // !defined( KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK )
+
+/**\brief  Derived from the C++17 'std::array'.
+ *         Dropping the iterator interface.
+ */
+template <class T = void, size_t N = KOKKOS_INVALID_INDEX, class Proxy = void>
+struct Array {
+ public:
+  /**
+   * The elements of this C array shall not be accessed directly. The data
+   * member has to be declared public to enable aggregate initialization as for
+   * std::array. We mark it as private in the documentation.
+   * @private
+   */
+  T m_internal_implementation_private_member_data[N];
+
+ public:
+  using reference       = T&;
+  using const_reference = std::add_const_t<T>&;
+  using size_type       = size_t;
+  using difference_type = ptrdiff_t;
+  using value_type      = T;
+  using pointer         = T*;
+  using const_pointer   = std::add_const_t<T>*;
+
+  KOKKOS_INLINE_FUNCTION static constexpr size_type size() { return N; }
+  KOKKOS_INLINE_FUNCTION static constexpr bool empty() { return false; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type max_size() const { return N; }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr reference operator[](const iType& i) {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integral argument");
+    KOKKOS_ARRAY_BOUNDS_CHECK(i, N);
+    return m_internal_implementation_private_member_data[i];
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr const_reference operator[](
+      const iType& i) const {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integral argument");
+    KOKKOS_ARRAY_BOUNDS_CHECK(i, N);
+    return m_internal_implementation_private_member_data[i];
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr pointer data() {
+    return &m_internal_implementation_private_member_data[0];
+  }
+  KOKKOS_INLINE_FUNCTION constexpr const_pointer data() const {
+    return &m_internal_implementation_private_member_data[0];
+  }
+};
+
+template <class T, class Proxy>
+struct Array<T, 0, Proxy> {
+ public:
+  using reference       = T&;
+  using const_reference = std::add_const_t<T>&;
+  using size_type       = size_t;
+  using difference_type = ptrdiff_t;
+  using value_type      = T;
+  using pointer         = T*;
+  using const_pointer   = std::add_const_t<T>*;
+
+  KOKKOS_INLINE_FUNCTION static constexpr size_type size() { return 0; }
+  KOKKOS_INLINE_FUNCTION static constexpr bool empty() { return true; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type max_size() const { return 0; }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION reference operator[](const iType&) {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integer argument");
+    Kokkos::abort("Unreachable code");
+    return *reinterpret_cast<pointer>(-1);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION const_reference operator[](const iType&) const {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integer argument");
+    Kokkos::abort("Unreachable code");
+    return *reinterpret_cast<const_pointer>(-1);
+  }
+
+  KOKKOS_INLINE_FUNCTION pointer data() { return pointer(0); }
+  KOKKOS_INLINE_FUNCTION const_pointer data() const { return const_pointer(0); }
+
+  KOKKOS_DEFAULTED_FUNCTION ~Array()            = default;
+  KOKKOS_DEFAULTED_FUNCTION Array()             = default;
+  KOKKOS_DEFAULTED_FUNCTION Array(const Array&) = default;
+  KOKKOS_DEFAULTED_FUNCTION Array& operator=(const Array&) = default;
+
+  // Some supported compilers are not sufficiently C++11 compliant
+  // for default move constructor and move assignment operator.
+  // Array( Array && ) = default ;
+  // Array & operator = ( Array && ) = default ;
+};
+
+template <>
+struct Array<void, KOKKOS_INVALID_INDEX, void> {
+  struct contiguous {};
+  struct strided {};
+};
+
+template <class T>
+struct Array<T, KOKKOS_INVALID_INDEX, Array<>::contiguous> {
+ private:
+  T* m_elem;
+  size_t m_size;
+
+ public:
+  using reference       = T&;
+  using const_reference = std::add_const_t<T>&;
+  using size_type       = size_t;
+  using difference_type = ptrdiff_t;
+  using value_type      = T;
+  using pointer         = T*;
+  using const_pointer   = std::add_const_t<T>*;
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type size() const { return m_size; }
+  KOKKOS_INLINE_FUNCTION constexpr bool empty() const { return 0 != m_size; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type max_size() const { return m_size; }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION reference operator[](const iType& i) {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integral argument");
+    KOKKOS_ARRAY_BOUNDS_CHECK(i, m_size);
+    return m_elem[i];
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION const_reference operator[](const iType& i) const {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integral argument");
+    KOKKOS_ARRAY_BOUNDS_CHECK(i, m_size);
+    return m_elem[i];
+  }
+
+  KOKKOS_INLINE_FUNCTION pointer data() { return m_elem; }
+  KOKKOS_INLINE_FUNCTION const_pointer data() const { return m_elem; }
+
+  KOKKOS_DEFAULTED_FUNCTION ~Array()                     = default;
+  KOKKOS_INLINE_FUNCTION_DELETED Array()                 = delete;
+  KOKKOS_INLINE_FUNCTION_DELETED Array(const Array& rhs) = delete;
+
+  // Some supported compilers are not sufficiently C++11 compliant
+  // for default move constructor and move assignment operator.
+  // Array( Array && rhs ) = default ;
+  // Array & operator = ( Array && rhs ) = delete ;
+
+  KOKKOS_INLINE_FUNCTION
+  Array& operator=(const Array& rhs) {
+    const size_t n = std::min(m_size, rhs.size());
+    for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
+    return *this;
+  }
+
+  template <size_t N, class P>
+  KOKKOS_INLINE_FUNCTION Array& operator=(const Array<T, N, P>& rhs) {
+    const size_t n = std::min(m_size, rhs.size());
+    for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr Array(pointer arg_ptr, size_type arg_size,
+                                         size_type = 0)
+      : m_elem(arg_ptr), m_size(arg_size) {}
+};
+
+template <class T>
+struct Array<T, KOKKOS_INVALID_INDEX, Array<>::strided> {
+ private:
+  T* m_elem;
+  size_t m_size;
+  size_t m_stride;
+
+ public:
+  using reference       = T&;
+  using const_reference = std::add_const_t<T>&;
+  using size_type       = size_t;
+  using difference_type = ptrdiff_t;
+  using value_type      = T;
+  using pointer         = T*;
+  using const_pointer   = std::add_const_t<T>*;
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type size() const { return m_size; }
+  KOKKOS_INLINE_FUNCTION constexpr bool empty() const { return 0 != m_size; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type max_size() const { return m_size; }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION reference operator[](const iType& i) {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integral argument");
+    KOKKOS_ARRAY_BOUNDS_CHECK(i, m_size);
+    return m_elem[i * m_stride];
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION const_reference operator[](const iType& i) const {
+    static_assert(
+        (std::is_integral<iType>::value || std::is_enum<iType>::value),
+        "Must be integral argument");
+    KOKKOS_ARRAY_BOUNDS_CHECK(i, m_size);
+    return m_elem[i * m_stride];
+  }
+
+  KOKKOS_INLINE_FUNCTION pointer data() { return m_elem; }
+  KOKKOS_INLINE_FUNCTION const_pointer data() const { return m_elem; }
+
+  KOKKOS_DEFAULTED_FUNCTION ~Array()                 = default;
+  KOKKOS_INLINE_FUNCTION_DELETED Array()             = delete;
+  KOKKOS_INLINE_FUNCTION_DELETED Array(const Array&) = delete;
+
+  // Some supported compilers are not sufficiently C++11 compliant
+  // for default move constructor and move assignment operator.
+  // Array( Array && rhs ) = default ;
+  // Array & operator = ( Array && rhs ) = delete ;
+
+  KOKKOS_INLINE_FUNCTION
+  Array& operator=(const Array& rhs) {
+    const size_t n = std::min(m_size, rhs.size());
+    for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
+    return *this;
+  }
+
+  template <size_t N, class P>
+  KOKKOS_INLINE_FUNCTION Array& operator=(const Array<T, N, P>& rhs) {
+    const size_t n = std::min(m_size, rhs.size());
+    for (size_t i = 0; i < n; ++i) m_elem[i] = rhs[i];
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr Array(pointer arg_ptr, size_type arg_size,
+                                         size_type arg_stride)
+      : m_elem(arg_ptr), m_size(arg_size), m_stride(arg_stride) {}
+};
+
+}  // namespace Kokkos
+
+//<editor-fold desc="Support for structured binding">
+// guarding against bogus error 'specialization in different namespace' with
+// older GCC that do not support C++17 anyway
+#if !defined(KOKKOS_COMPILER_GNU) || (KOKKOS_COMPILER_GNU >= 710)
+#if defined(KOKKOS_COMPILER_CLANG) && KOKKOS_COMPILER_CLANG < 800
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmismatched-tags"
+#endif
+template <class T, std::size_t N>
+struct std::tuple_size<Kokkos::Array<T, N>>
+    : std::integral_constant<std::size_t, N> {};
+
+template <std::size_t I, class T, std::size_t N>
+struct std::tuple_element<I, Kokkos::Array<T, N>> {
+  using type = T;
+};
+#if defined(KOKKOS_COMPILER_CLANG) && KOKKOS_COMPILER_CLANG < 800
+#pragma clang diagnostic pop
+#endif
+#endif
+
+namespace Kokkos {
+
+template <std::size_t I, class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T& get(Array<T, N>& a) noexcept {
+  return a[I];
+}
+
+template <std::size_t I, class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T const& get(Array<T, N> const& a) noexcept {
+  return a[I];
+}
+
+template <std::size_t I, class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T&& get(Array<T, N>&& a) noexcept {
+  return std::move(a[I]);
+}
+
+template <std::size_t I, class T, std::size_t N>
+KOKKOS_FUNCTION constexpr T const&& get(Array<T, N> const&& a) noexcept {
+  return std::move(a[I]);
+}
+
+}  // namespace Kokkos
+//</editor-fold>
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ARRAY
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ARRAY
+#endif
+#endif /* #ifndef KOKKOS_ARRAY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Atomic.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Atomic.hpp
new file mode 100644 (file)
index 0000000..7a2d1c6
--- /dev/null
@@ -0,0 +1,425 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_Atomic.hpp
+/// \brief Atomic functions
+///
+/// This header file defines prototypes for the following atomic functions:
+///   - exchange
+///   - compare and exchange
+///   - add
+///
+/// Supported types include:
+///   - signed and unsigned 4 and 8 byte integers
+///   - float
+///   - double
+///
+/// They are implemented through GCC compatible intrinsics, OpenMP
+/// directives and native CUDA intrinsics.
+///
+/// Including this header file requires one of the following
+/// compilers:
+///   - NVCC (for CUDA device code only)
+///   - GCC (for host code only)
+///   - Intel (for host code only)
+///   - A compiler that supports OpenMP 3.1 (for host code only)
+
+#ifndef KOKKOS_ATOMIC_HPP
+#define KOKKOS_ATOMIC_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+#include <Kokkos_Atomics_Desul_Wrapper.hpp>
+#include <Kokkos_Atomics_Desul_Volatile_Wrapper.hpp>
+#include <impl/Kokkos_Utilities.hpp>
+
+// Helper functions for places where we really should have called SeqCst atomics
+// anyway These can go away when we call desul unconditionally Non-Desul
+// versions are below
+namespace Kokkos {
+namespace Impl {
+using desul::MemoryOrderSeqCst;
+using desul::MemoryScopeDevice;
+
+template <class T>
+KOKKOS_INLINE_FUNCTION void desul_atomic_dec(T* dest, MemoryOrderSeqCst,
+                                             MemoryScopeDevice) {
+  return desul::atomic_dec(const_cast<T*>(dest), desul::MemoryOrderSeqCst(),
+                           desul::MemoryScopeDevice());
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION void desul_atomic_inc(T* dest, MemoryOrderSeqCst,
+                                             MemoryScopeDevice) {
+  return desul::atomic_inc(const_cast<T*>(dest), desul::MemoryOrderSeqCst(),
+                           desul::MemoryScopeDevice());
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION T
+desul_atomic_exchange(T* dest, const Kokkos::Impl::identity_t<T> val,
+                      MemoryOrderSeqCst, MemoryScopeDevice) {
+  return desul::atomic_exchange(const_cast<T*>(dest), val,
+                                desul::MemoryOrderSeqCst(),
+                                desul::MemoryScopeDevice());
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION T desul_atomic_compare_exchange(
+    T* dest, Kokkos::Impl::identity_t<const T> compare,
+    Kokkos::Impl::identity_t<const T> val, MemoryOrderSeqCst,
+    MemoryScopeDevice) {
+  return desul::atomic_compare_exchange(dest, compare, val,
+                                        desul::MemoryOrderSeqCst(),
+                                        desul::MemoryScopeDevice());
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+#else
+
+#include <Kokkos_HostSpace.hpp>
+#include <impl/Kokkos_Traits.hpp>
+
+//----------------------------------------------------------------------------
+
+// Need to fix this for pure clang on windows
+#if defined(_WIN32)
+#define KOKKOS_ENABLE_WINDOWS_ATOMICS
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#define KOKKOS_ENABLE_CUDA_ATOMICS
+#if defined(KOKKOS_COMPILER_CLANG)
+#define KOKKOS_ENABLE_GNU_ATOMICS
+#endif
+#endif
+
+#else  // _WIN32
+#if defined(KOKKOS_ENABLE_CUDA)
+
+// Compiling NVIDIA device code, must use Cuda atomics:
+
+#define KOKKOS_ENABLE_CUDA_ATOMICS
+
+#elif defined(KOKKOS_ENABLE_HIP)
+
+#define KOKKOS_ENABLE_HIP_ATOMICS
+
+#endif
+
+#if !defined(KOKKOS_ENABLE_GNU_ATOMICS) &&    \
+    !defined(KOKKOS_ENABLE_INTEL_ATOMICS) &&  \
+    !defined(KOKKOS_ENABLE_OPENMP_ATOMICS) && \
+    !defined(KOKKOS_ENABLE_STD_ATOMICS) &&    \
+    !defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+// Compiling for non-Cuda atomic implementation has not been pre-selected.
+// Choose the best implementation for the detected compiler.
+// Preference: GCC, INTEL, OMP31
+
+#if defined(KOKKOS_INTERNAL_NOT_PARALLEL)
+
+#define KOKKOS_ENABLE_SERIAL_ATOMICS
+
+#elif defined(KOKKOS_COMPILER_GNU) || defined(KOKKOS_COMPILER_CLANG) || \
+    (defined(KOKKOS_COMPILER_NVCC) || defined(KOKKOS_COMPILER_IBM))
+
+#define KOKKOS_ENABLE_GNU_ATOMICS
+
+#elif defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_CRAYC)
+
+#define KOKKOS_ENABLE_INTEL_ATOMICS
+
+#elif defined(_OPENMP) && (201107 <= _OPENMP)
+
+#define KOKKOS_ENABLE_OPENMP_ATOMICS
+
+#else
+
+#error "KOKKOS_ATOMICS_USE : Unsupported compiler"
+
+#endif
+
+#endif /* Not pre-selected atomic implementation */
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA
+#include <Cuda/Kokkos_Cuda_Locks.hpp>
+#endif
+
+namespace Kokkos {
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_add(volatile T* const dest, const T src);
+
+// Atomic increment
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_increment(volatile T* a);
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_decrement(volatile T* a);
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+inline const char* atomic_query_version() {
+#if defined(KOKKOS_ENABLE_CUDA_ATOMICS)
+  return "KOKKOS_ENABLE_CUDA_ATOMICS";
+#elif defined(KOKKOS_ENABLE_GNU_ATOMICS)
+  return "KOKKOS_ENABLE_GNU_ATOMICS";
+#elif defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+  return "KOKKOS_ENABLE_INTEL_ATOMICS";
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+  return "KOKKOS_ENABLE_OPENMP_ATOMICS";
+#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
+  return "KOKKOS_ENABLE_WINDOWS_ATOMICS";
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  return "KOKKOS_ENABLE_SERIAL_ATOMICS";
+#else
+#error "No valid response for atomic_query_version!"
+#endif
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+// Atomic Memory Orders
+//
+// Implements Strongly-typed analogs of C++ standard memory orders
+#include "impl/Kokkos_Atomic_Memory_Order.hpp"
+
+#if defined(KOKKOS_ENABLE_HIP)
+#include <HIP/Kokkos_HIP_Atomic.hpp>
+#endif
+
+#if defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
+#include "impl/Kokkos_Atomic_Windows.hpp"
+#endif
+//----------------------------------------------------------------------------
+// Atomic Assembly
+//
+// Implements CAS128-bit in assembly
+
+#include "impl/Kokkos_Atomic_Assembly.hpp"
+
+//----------------------------------------------------------------------------
+// Memory fence
+//
+// All loads and stores from this thread will be globally consistent before
+// continuing
+//
+// void memory_fence() {...};
+#include "impl/Kokkos_Memory_Fence.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic exchange
+//
+// template< typename T >
+// T atomic_exchange( volatile T* const dest , const T val )
+// { T tmp = *dest ; *dest = val ; return tmp ; }
+
+#include "impl/Kokkos_Atomic_Exchange.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic compare-and-exchange
+//
+// template<class T>
+// bool atomic_compare_exchange_strong(volatile T* const dest, const T compare,
+// const T val) { bool equal = compare == *dest ; if ( equal ) { *dest = val ; }
+// return equal ; }
+
+#include "impl/Kokkos_Atomic_Compare_Exchange_Strong.hpp"
+
+#include "impl/Kokkos_Atomic_Generic.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic fetch and add
+//
+// template<class T>
+// T atomic_fetch_add(volatile T* const dest, const T val)
+// { T tmp = *dest ; *dest += val ; return tmp ; }
+
+#include "impl/Kokkos_Atomic_Fetch_Add.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic increment
+//
+// template<class T>
+// T atomic_increment(volatile T* const dest)
+// { dest++; }
+
+#include "impl/Kokkos_Atomic_Increment.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic Decrement
+//
+// template<class T>
+// T atomic_decrement(volatile T* const dest)
+// { dest--; }
+
+#include "impl/Kokkos_Atomic_Decrement.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic fetch and sub
+//
+// template<class T>
+// T atomic_fetch_sub(volatile T* const dest, const T val)
+// { T tmp = *dest ; *dest -= val ; return tmp ; }
+
+#include "impl/Kokkos_Atomic_Fetch_Sub.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic fetch and or
+//
+// template<class T>
+// T atomic_fetch_or(volatile T* const dest, const T val)
+// { T tmp = *dest ; *dest = tmp | val ; return tmp ; }
+
+#include "impl/Kokkos_Atomic_Fetch_Or.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic fetch and and
+//
+// template<class T>
+// T atomic_fetch_and(volatile T* const dest, const T val)
+// { T tmp = *dest ; *dest = tmp & val ; return tmp ; }
+
+#include "impl/Kokkos_Atomic_Fetch_And.hpp"
+
+//----------------------------------------------------------------------------
+// Atomic MinMax
+//
+// template<class T>
+// T atomic_min(volatile T* const dest, const T val)
+// { T tmp = *dest ; *dest = min(*dest, val); return tmp ; }
+// template<class T>
+// T atomic_max(volatile T* const dest, const T val)
+// { T tmp = *dest ; *dest = max(*dest, val); return tmp ; }
+
+#include "impl/Kokkos_Atomic_MinMax.hpp"
+
+//----------------------------------------------------------------------------
+// Provide volatile_load and safe_load
+//
+// T volatile_load(T const volatile * const ptr);
+//
+// T const& safe_load(T const * const ptr);
+// XEON PHI
+// T safe_load(T const * const ptr
+
+#include "impl/Kokkos_Volatile_Load.hpp"
+
+//----------------------------------------------------------------------------
+// Provide atomic loads and stores with memory order semantics
+
+#include "impl/Kokkos_Atomic_Load.hpp"
+#include "impl/Kokkos_Atomic_Store.hpp"
+
+// Generic functions using the above defined functions
+#include "impl/Kokkos_Atomic_Generic_Secondary.hpp"
+//----------------------------------------------------------------------------
+// This atomic-style macro should be an inlined function, not a macro
+
+#if defined(KOKKOS_COMPILER_GNU) && !defined(__PGIC__) && \
+    !defined(__CUDA_ARCH__)
+
+#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) __builtin_prefetch(addr, 0, 0)
+#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) __builtin_prefetch(addr, 1, 0)
+
+#else
+
+#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) ((void)0)
+#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) ((void)0)
+
+#endif
+
+//----------------------------------------------------------------------------
+
+// Helper functions for places where we really should have called SeqCst atomics
+// anyway These can go away when we call desul unconditionally
+namespace Kokkos {
+namespace Impl {
+struct MemoryOrderSeqCst {};
+struct MemoryScopeDevice {};
+
+template <class T>
+KOKKOS_INLINE_FUNCTION void desul_atomic_dec(T* dest, MemoryOrderSeqCst,
+                                             MemoryScopeDevice) {
+  return Kokkos::atomic_decrement(dest);
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION void desul_atomic_inc(T* dest, MemoryOrderSeqCst,
+                                             MemoryScopeDevice) {
+  return Kokkos::atomic_increment(dest);
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION T
+desul_atomic_exchange(T* dest, Kokkos::Impl::identity_t<const T> val,
+                      MemoryOrderSeqCst, MemoryScopeDevice) {
+  return Kokkos::atomic_exchange(dest, val);
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION T desul_atomic_compare_exchange(
+    T* dest, Kokkos::Impl::identity_t<const T> compare,
+    Kokkos::Impl::identity_t<const T> val, MemoryOrderSeqCst,
+    MemoryScopeDevice) {
+  return Kokkos::atomic_compare_exchange(dest, compare, val);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* !KOKKOS_ENABLE_IMPL_DESUL_ATOMICS */
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_ATOMIC
+#endif
+#endif /* KOKKOS_ATOMIC_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Config.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Config.hpp
new file mode 100644 (file)
index 0000000..ef576d7
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_ATOMICS_DESUL_CONFIG_HPP
+#define KOKKOS_ATOMICS_DESUL_CONFIG_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+#define DESUL_HAVE_OPENMP_ATOMICS
+#endif
+
+#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
+    defined(KOKKOS_ARCH_PASCAL)
+#define DESUL_CUDA_ARCH_IS_PRE_VOLTA
+#endif
+
+#endif  // KOKKOS_ATOMICS_DESUL_CONFIG_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Volatile_Wrapper.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Volatile_Wrapper.hpp
new file mode 100644 (file)
index 0000000..b202ab8
--- /dev/null
@@ -0,0 +1,208 @@
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_DESUL_ATOMICS_VOLATILE_WRAPPER_HPP_
+#define KOKKOS_DESUL_ATOMICS_VOLATILE_WRAPPER_HPP_
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+#include <Kokkos_Atomics_Desul_Config.hpp>
+#include <desul/atomics.hpp>
+
+#ifdef KOKKOS_INTERNAL_NOT_PARALLEL
+#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeCaller()
+#else
+#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeDevice()
+#endif
+
+// clang-format off
+namespace Kokkos {
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_load(volatile T* const dest) { return desul::atomic_load(const_cast<T*>(dest), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_store(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_store(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// atomic_fetch_op
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_add (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_add (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_USE_DOUBLE_ATOMICADD
+KOKKOS_INLINE_FUNCTION
+double atomic_fetch_add(volatile double* const dest, double val) {
+  #ifdef __CUDA_ARCH__
+  return atomicAdd(const_cast<double*>(dest),val);
+  #else
+  return desul::atomic_fetch_add (const_cast<double*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+  #endif
+};
+
+KOKKOS_INLINE_FUNCTION
+double atomic_fetch_sub(volatile double* const dest, double val) {
+  #ifdef __CUDA_ARCH__
+  return atomicAdd(const_cast<double*>(dest),-val);
+  #else
+  return desul::atomic_fetch_sub (const_cast<double*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+  #endif
+};
+#endif
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_sub (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_sub (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_max (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_max (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_min (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_min (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_mul (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mul (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_div (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_div (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_mod (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mod (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_and (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_and (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_or  (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_or  (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_xor (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_xor (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_nand(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_nand(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_lshift(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_lshift(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_rshift(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_rshift(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_inc(volatile T* const dest) { return desul::atomic_fetch_inc(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_dec(volatile T* const dest) { return desul::atomic_fetch_dec(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+
+// atomic_op_fetch
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_add_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_sub_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_max_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_min_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_mul_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_div_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_mod_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mod_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_and_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_and_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_or_fetch  (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_or_fetch  (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_xor_fetch (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_xor_fetch (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_nand_fetch(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_nand_fetch(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_lshift_fetch(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_lshift_fetch(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_rshift_fetch(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_rshift_fetch(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_inc_fetch(volatile T* const dest) { return desul::atomic_inc_fetch(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_dec_fetch(volatile T* const dest) { return desul::atomic_dec_fetch(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+
+// atomic_op
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_add(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_sub(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_mul(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_div(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_min(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_max(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// FIXME: Desul doesn't have atomic_and yet so call fetch_and
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_and(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { (void) desul::atomic_fetch_and (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// FIXME: Desul doesn't have atomic_or yet so call fetch_or
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_or (volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { (void) desul::atomic_fetch_or  (const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_inc(volatile T* const dest) { return desul::atomic_inc(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_dec(volatile T* const dest) { return desul::atomic_dec(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_increment(volatile T* const dest) { return desul::atomic_inc(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_decrement(volatile T* const dest) { return desul::atomic_dec(const_cast<T*>(dest),desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// Exchange
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_exchange(volatile T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_exchange(const_cast<T*>(dest), val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+bool atomic_compare_exchange_strong(volatile T* const dest, T& expected, const T desired) {
+  return desul::atomic_compare_exchange_strong(const_cast<T*>(dest),expected, desired,
+                  desul::MemoryOrderRelaxed(), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+}
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_compare_exchange(volatile T* const dest, const T compare, const T desired) {
+  return desul::atomic_compare_exchange(const_cast<T*>(dest),compare, desired,
+                  desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+}
+
+}
+#undef KOKKOS_DESUL_MEM_SCOPE
+
+// clang-format on
+#endif  // KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Wrapper.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Atomics_Desul_Wrapper.hpp
new file mode 100644 (file)
index 0000000..ed7e8d9
--- /dev/null
@@ -0,0 +1,291 @@
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_DESUL_ATOMICS_WRAPPER_HPP_
+#define KOKKOS_DESUL_ATOMICS_WRAPPER_HPP_
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+#include <Kokkos_Atomics_Desul_Config.hpp>
+#include <desul/atomics.hpp>
+
+#include <impl/Kokkos_Atomic_Memory_Order.hpp>
+#include <impl/Kokkos_Volatile_Load.hpp>
+
+// clang-format off
+namespace Kokkos {
+
+// FIXME: These functions don't have any use/test in unit tests ...
+// ==========================================================
+inline const char* atomic_query_version() { return "KOKKOS_DESUL_ATOMICS"; }
+
+#if defined(KOKKOS_COMPILER_GNU) && !defined(__PGIC__) && \
+    !defined(__CUDA_ARCH__)
+
+#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) __builtin_prefetch(addr, 0, 0)
+#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) __builtin_prefetch(addr, 1, 0)
+
+#else
+
+#define KOKKOS_NONTEMPORAL_PREFETCH_LOAD(addr) ((void)0)
+#define KOKKOS_NONTEMPORAL_PREFETCH_STORE(addr) ((void)0)
+
+#endif
+// ============================================================
+
+#ifdef KOKKOS_INTERNAL_NOT_PARALLEL
+#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeCaller()
+#else
+#define KOKKOS_DESUL_MEM_SCOPE desul::MemoryScopeDevice()
+#endif
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_load(T* const dest) { return desul::atomic_load(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_store(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_store(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_assign(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { atomic_store(dest,val); }
+
+KOKKOS_INLINE_FUNCTION
+void memory_fence() {
+  desul::atomic_thread_fence(desul::MemoryOrderSeqCst(), KOKKOS_DESUL_MEM_SCOPE);
+}
+
+KOKKOS_INLINE_FUNCTION
+void load_fence() { return desul::atomic_thread_fence(desul::MemoryOrderAcquire(), KOKKOS_DESUL_MEM_SCOPE); }
+
+KOKKOS_INLINE_FUNCTION
+void store_fence() { return desul::atomic_thread_fence(desul::MemoryOrderRelease(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// atomic_fetch_op
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_add (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_add (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_USE_DOUBLE_ATOMICADD
+KOKKOS_INLINE_FUNCTION
+double atomic_fetch_add(double* const dest, double val) {
+  #ifdef __CUDA_ARCH__
+  return atomicAdd(dest,val);
+  #else
+  return desul::atomic_fetch_add (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+  #endif
+};
+
+KOKKOS_INLINE_FUNCTION
+double atomic_fetch_sub(double* const dest, double val) {
+  #ifdef __CUDA_ARCH__
+  return atomicAdd(dest,-val);
+  #else
+  return desul::atomic_fetch_sub (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+  #endif
+};
+#endif
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_sub (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_sub (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_max (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_max (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_min (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_min (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_mul (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mul (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_div (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_div (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_mod (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_mod (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_and (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_and (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_or  (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_or  (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_xor (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_xor (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_nand(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_nand(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_lshift(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_lshift(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_rshift(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_fetch_rshift(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_inc(T* const dest) { return desul::atomic_fetch_inc(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_fetch_dec(T* const dest) { return desul::atomic_fetch_dec(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+
+// atomic_op_fetch
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_add_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_sub_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_max_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_min_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_mul_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_div_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_mod_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mod_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_and_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_and_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_or_fetch  (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_or_fetch  (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_xor_fetch (T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_xor_fetch (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_nand_fetch(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_nand_fetch(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_lshift_fetch(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_lshift_fetch(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_rshift_fetch(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_rshift_fetch(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_inc_fetch(T* const dest) { return desul::atomic_inc_fetch(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_dec_fetch(T* const dest) { return desul::atomic_dec_fetch(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+
+// atomic_op
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_add(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_add (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_sub(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_sub (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_mul(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_mul (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_div(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_div (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_min(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_min (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_max(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_max (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// FIXME: Desul doesn't have atomic_and yet so call fetch_and
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_and(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { (void) desul::atomic_fetch_and (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// FIXME: Desul doesn't have atomic_or yet so call fetch_or
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_or(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val)  { (void) desul::atomic_fetch_or (dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_inc(T* const dest) { return desul::atomic_inc(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_dec(T* const dest) { return desul::atomic_dec(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_increment(T* const dest) { return desul::atomic_inc(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+void atomic_decrement(T* const dest) { return desul::atomic_dec(dest, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+// Exchange
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_exchange(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> val) { return desul::atomic_exchange(dest, val, desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE); }
+
+template<class T> KOKKOS_INLINE_FUNCTION
+bool atomic_compare_exchange_strong(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> expected, desul::Impl::dont_deduce_this_parameter_t<const T> desired) {
+  T expected_ref = expected;
+  return desul::atomic_compare_exchange_strong(dest, expected_ref, desired,
+                  desul::MemoryOrderRelaxed(), desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+}
+
+template<class T> KOKKOS_INLINE_FUNCTION
+T atomic_compare_exchange(T* const dest, desul::Impl::dont_deduce_this_parameter_t<const T> compare, desul::Impl::dont_deduce_this_parameter_t<const T> desired) {
+  return desul::atomic_compare_exchange(dest, compare, desired,
+                  desul::MemoryOrderRelaxed(), KOKKOS_DESUL_MEM_SCOPE);
+}
+
+namespace Impl {
+
+  template<class MemoryOrder>
+  struct KokkosToDesulMemoryOrder;
+
+  template<>
+  struct KokkosToDesulMemoryOrder<memory_order_seq_cst_t> {
+    using type = desul::MemoryOrderSeqCst;
+  };
+  template<>
+  struct KokkosToDesulMemoryOrder<memory_order_acquire_t> {
+    using type = desul::MemoryOrderAcquire;
+  };
+  template<>
+  struct KokkosToDesulMemoryOrder<memory_order_release_t> {
+    using type = desul::MemoryOrderRelease;
+  };
+  template<>
+  struct KokkosToDesulMemoryOrder<memory_order_acq_rel_t> {
+    using type = desul::MemoryOrderAcqRel;
+  };
+  template<>
+  struct KokkosToDesulMemoryOrder<memory_order_relaxed_t> {
+    using type = desul::MemoryOrderRelaxed;
+  };
+  template<class T, class MemOrderSuccess, class MemOrderFailure> KOKKOS_INLINE_FUNCTION
+  bool atomic_compare_exchange_strong(T* const dest, T& expected, const T desired, MemOrderSuccess, MemOrderFailure) {
+    return desul::atomic_compare_exchange_strong(dest, expected, desired,
+                  typename KokkosToDesulMemoryOrder<MemOrderSuccess>::type(),
+                  typename KokkosToDesulMemoryOrder<MemOrderFailure>::type(),
+                  KOKKOS_DESUL_MEM_SCOPE);
+
+  }
+  template<class T, class MemoryOrder>
+  KOKKOS_INLINE_FUNCTION
+  T atomic_load(const T* const src, MemoryOrder) {
+    return desul::atomic_load(src, typename KokkosToDesulMemoryOrder<MemoryOrder>::type(), KOKKOS_DESUL_MEM_SCOPE);
+  }
+  template<class T, class MemoryOrder>
+  KOKKOS_INLINE_FUNCTION
+  void atomic_store(T* const src, const T val, MemoryOrder) {
+    return desul::atomic_store(src, val, typename KokkosToDesulMemoryOrder<MemoryOrder>::type(), KOKKOS_DESUL_MEM_SCOPE);
+  }
+}
+
+}
+
+#undef KOKKOS_DESUL_MEM_SCOPE
+
+// clang-format on
+#endif  // KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Complex.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Complex.hpp
new file mode 100644 (file)
index 0000000..009c73c
--- /dev/null
@@ -0,0 +1,1004 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_COMPLEX_HPP
+#define KOKKOS_COMPLEX_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_COMPLEX
+#endif
+
+#include <Kokkos_Atomic.hpp>
+#include <Kokkos_MathematicalFunctions.hpp>
+#include <Kokkos_NumericTraits.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <complex>
+#include <type_traits>
+#include <iosfwd>
+
+namespace Kokkos {
+
+/// \class complex
+/// \brief Partial reimplementation of std::complex that works as the
+///   result of a Kokkos::parallel_reduce.
+/// \tparam RealType The type of the real and imaginary parts of the
+///   complex number.  As with std::complex, this is only defined for
+///   \c float, \c double, and <tt>long double</tt>.  The latter is
+///   currently forbidden in CUDA device kernels.
+template <class RealType>
+class
+#ifdef KOKKOS_ENABLE_COMPLEX_ALIGN
+    alignas(2 * sizeof(RealType))
+#endif
+        complex {
+ private:
+  RealType re_{};
+  RealType im_{};
+
+ public:
+  //! The type of the real or imaginary parts of this complex number.
+  using value_type = RealType;
+
+  //! Default constructor (initializes both real and imaginary parts to zero).
+  KOKKOS_DEFAULTED_FUNCTION
+  complex() = default;
+
+  //! Copy constructor.
+  KOKKOS_DEFAULTED_FUNCTION
+  complex(const complex&) noexcept = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  complex& operator=(const complex&) noexcept = default;
+
+  /// \brief Conversion constructor from compatible RType
+  template <
+      class RType,
+      std::enable_if_t<std::is_convertible<RType, RealType>::value, int> = 0>
+  KOKKOS_INLINE_FUNCTION complex(const complex<RType>& other) noexcept
+      // Intentionally do the conversions implicitly here so that users don't
+      // get any warnings about narrowing, etc., that they would expect to get
+      // otherwise.
+      : re_(other.real()), im_(other.imag()) {}
+
+  /// \brief Conversion constructor from std::complex.
+  ///
+  /// This constructor cannot be called in a CUDA device function,
+  /// because std::complex's methods and nonmember functions are not
+  /// marked as CUDA device functions.
+  KOKKOS_INLINE_FUNCTION
+  complex(const std::complex<RealType>& src) noexcept
+      // We can use this aspect of the standard to avoid calling
+      // non-device-marked functions `std::real` and `std::imag`: "For any
+      // object z of type complex<T>, reinterpret_cast<T(&)[2]>(z)[0] is the
+      // real part of z and reinterpret_cast<T(&)[2]>(z)[1] is the imaginary
+      // part of z." Now we don't have to provide a whole bunch of the overloads
+      // of things taking either Kokkos::complex or std::complex
+      : re_(reinterpret_cast<const RealType (&)[2]>(src)[0]),
+        im_(reinterpret_cast<const RealType (&)[2]>(src)[1]) {}
+
+  /// \brief Conversion operator to std::complex.
+  ///
+  /// This operator cannot be called in a CUDA device function,
+  /// because std::complex's methods and nonmember functions are not
+  /// marked as CUDA device functions.
+  // TODO: make explicit.  DJS 2019-08-28
+  operator std::complex<RealType>() const noexcept {
+    return std::complex<RealType>(re_, im_);
+  }
+
+  /// \brief Constructor that takes just the real part, and sets the
+  ///   imaginary part to zero.
+  KOKKOS_INLINE_FUNCTION complex(const RealType& val) noexcept
+      : re_(val), im_(static_cast<RealType>(0)) {}
+
+  //! Constructor that takes the real and imaginary parts.
+  KOKKOS_INLINE_FUNCTION
+  complex(const RealType& re, const RealType& im) noexcept : re_(re), im_(im) {}
+
+  //! Assignment operator (from a real number).
+  KOKKOS_INLINE_FUNCTION complex& operator=(const RealType& val) noexcept {
+    re_ = val;
+    im_ = RealType(0);
+    return *this;
+  }
+
+  /// \brief Assignment operator from std::complex.
+  ///
+  /// This constructor cannot be called in a CUDA device function,
+  /// because std::complex's methods and nonmember functions are not
+  /// marked as CUDA device functions.
+  complex& operator=(const std::complex<RealType>& src) noexcept {
+    *this = complex(src);
+    return *this;
+  }
+
+  //! The imaginary part of this complex number.
+  KOKKOS_INLINE_FUNCTION
+  constexpr RealType& imag() noexcept { return im_; }
+
+  //! The real part of this complex number.
+  KOKKOS_INLINE_FUNCTION
+  constexpr RealType& real() noexcept { return re_; }
+
+  //! The imaginary part of this complex number.
+  KOKKOS_INLINE_FUNCTION
+  constexpr RealType imag() const noexcept { return im_; }
+
+  //! The real part of this complex number.
+  KOKKOS_INLINE_FUNCTION
+  constexpr RealType real() const noexcept { return re_; }
+
+  //! Set the imaginary part of this complex number.
+  KOKKOS_INLINE_FUNCTION
+  constexpr void imag(RealType v) noexcept { im_ = v; }
+
+  //! Set the real part of this complex number.
+  KOKKOS_INLINE_FUNCTION
+  constexpr void real(RealType v) noexcept { re_ = v; }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator+=(
+      const complex<RealType>& src) noexcept {
+    re_ += src.re_;
+    im_ += src.im_;
+    return *this;
+  }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator+=(
+      const RealType& src) noexcept {
+    re_ += src;
+    return *this;
+  }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator-=(
+      const complex<RealType>& src) noexcept {
+    re_ -= src.re_;
+    im_ -= src.im_;
+    return *this;
+  }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator-=(
+      const RealType& src) noexcept {
+    re_ -= src;
+    return *this;
+  }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator*=(
+      const complex<RealType>& src) noexcept {
+    const RealType realPart = re_ * src.re_ - im_ * src.im_;
+    const RealType imagPart = re_ * src.im_ + im_ * src.re_;
+    re_                     = realPart;
+    im_                     = imagPart;
+    return *this;
+  }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator*=(
+      const RealType& src) noexcept {
+    re_ *= src;
+    im_ *= src;
+    return *this;
+  }
+
+  // Conditional noexcept, just in case RType throws on divide-by-zero
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator/=(
+      const complex<RealType>& y) noexcept(noexcept(RealType{} / RealType{})) {
+    // Scale (by the "1-norm" of y) to avoid unwarranted overflow.
+    // If the real part is +/-Inf and the imaginary part is -/+Inf,
+    // this won't change the result.
+    const RealType s = fabs(y.real()) + fabs(y.imag());
+
+    // If s is 0, then y is zero, so x/y == real(x)/0 + i*imag(x)/0.
+    // In that case, the relation x/y == (x/s) / (y/s) doesn't hold,
+    // because y/s is NaN.
+    // TODO mark this branch unlikely
+    if (s == RealType(0)) {
+      this->re_ /= s;
+      this->im_ /= s;
+    } else {
+      const complex x_scaled(this->re_ / s, this->im_ / s);
+      const complex y_conj_scaled(y.re_ / s, -(y.im_) / s);
+      const RealType y_scaled_abs =
+          y_conj_scaled.re_ * y_conj_scaled.re_ +
+          y_conj_scaled.im_ * y_conj_scaled.im_;  // abs(y) == abs(conj(y))
+      *this = x_scaled * y_conj_scaled;
+      *this /= y_scaled_abs;
+    }
+    return *this;
+  }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator/=(
+      const std::complex<RealType>& y) noexcept(noexcept(RealType{} /
+                                                         RealType{})) {
+    // Scale (by the "1-norm" of y) to avoid unwarranted overflow.
+    // If the real part is +/-Inf and the imaginary part is -/+Inf,
+    // this won't change the result.
+    const RealType s = fabs(y.real()) + fabs(y.imag());
+
+    // If s is 0, then y is zero, so x/y == real(x)/0 + i*imag(x)/0.
+    // In that case, the relation x/y == (x/s) / (y/s) doesn't hold,
+    // because y/s is NaN.
+    if (s == RealType(0)) {
+      this->re_ /= s;
+      this->im_ /= s;
+    } else {
+      const complex x_scaled(this->re_ / s, this->im_ / s);
+      const complex y_conj_scaled(y.re_ / s, -(y.im_) / s);
+      const RealType y_scaled_abs =
+          y_conj_scaled.re_ * y_conj_scaled.re_ +
+          y_conj_scaled.im_ * y_conj_scaled.im_;  // abs(y) == abs(conj(y))
+      *this = x_scaled * y_conj_scaled;
+      *this /= y_scaled_abs;
+    }
+    return *this;
+  }
+
+  constexpr KOKKOS_INLINE_FUNCTION complex& operator/=(
+      const RealType& src) noexcept(noexcept(RealType{} / RealType{})) {
+    re_ /= src;
+    im_ /= src;
+    return *this;
+  }
+
+  //---------------------------------------------------------------------------
+  // TODO: refactor Kokkos reductions to remove dependency on
+  // volatile member overloads since they are being deprecated in c++20
+  //---------------------------------------------------------------------------
+
+  //! Copy constructor from volatile.
+  template <
+      class RType,
+      std::enable_if_t<std::is_convertible<RType, RealType>::value, int> = 0>
+  KOKKOS_INLINE_FUNCTION complex(const volatile complex<RType>& src) noexcept
+      // Intentionally do the conversions implicitly here so that users don't
+      // get any warnings about narrowing, etc., that they would expect to get
+      // otherwise.
+      : re_(src.re_), im_(src.im_) {}
+
+  /// \brief Assignment operator, for volatile <tt>*this</tt> and
+  ///   nonvolatile input.
+  ///
+  /// \param src [in] Input; right-hand side of the assignment.
+  ///
+  /// This operator returns \c void instead of <tt>volatile
+  /// complex& </tt>.  See Kokkos Issue #177 for the
+  /// explanation.  In practice, this means that you should not chain
+  /// assignments with volatile lvalues.
+  //
+  // Templated, so as not to be a copy assignment operator (Kokkos issue #2577)
+  // Intended to behave as
+  //    void operator=(const complex&) volatile noexcept
+  //
+  // Use cases:
+  //    complex r;
+  //    const complex cr;
+  //    volatile complex vl;
+  //    vl = r;
+  //    vl = cr;
+  template <class Complex,
+            std::enable_if_t<std::is_same<Complex, complex>::value, int> = 0>
+  KOKKOS_INLINE_FUNCTION void operator=(const Complex& src) volatile noexcept {
+    re_ = src.re_;
+    im_ = src.im_;
+    // We deliberately do not return anything here.  See explanation
+    // in public documentation above.
+  }
+
+  //! Assignment operator, volatile LHS and volatile RHS
+  // TODO Should this return void like the other volatile assignment operators?
+  //
+  // Templated, so as not to be a copy assignment operator (Kokkos issue #2577)
+  // Intended to behave as
+  //    volatile complex& operator=(const volatile complex&) volatile noexcept
+  //
+  // Use cases:
+  //    volatile complex vr;
+  //    const volatile complex cvr;
+  //    volatile complex vl;
+  //    vl = vr;
+  //    vl = cvr;
+  template <class Complex,
+            std::enable_if_t<std::is_same<Complex, complex>::value, int> = 0>
+  KOKKOS_INLINE_FUNCTION volatile complex& operator=(
+      const volatile Complex& src) volatile noexcept {
+    re_ = src.re_;
+    im_ = src.im_;
+    return *this;
+  }
+
+  //! Assignment operator, volatile RHS and non-volatile LHS
+  //
+  // Templated, so as not to be a copy assignment operator (Kokkos issue #2577)
+  // Intended to behave as
+  //    complex& operator=(const volatile complex&) noexcept
+  //
+  // Use cases:
+  //    volatile complex vr;
+  //    const volatile complex cvr;
+  //    complex l;
+  //    l = vr;
+  //    l = cvr;
+  //
+  template <class Complex,
+            std::enable_if_t<std::is_same<Complex, complex>::value, int> = 0>
+  KOKKOS_INLINE_FUNCTION complex& operator=(
+      const volatile Complex& src) noexcept {
+    re_ = src.re_;
+    im_ = src.im_;
+    return *this;
+  }
+
+  // Mirroring the behavior of the assignment operators from complex RHS in the
+  // RealType RHS versions.
+
+  //! Assignment operator (from a volatile real number).
+  KOKKOS_INLINE_FUNCTION void operator=(const volatile RealType& val) noexcept {
+    re_ = val;
+    im_ = RealType(0);
+    // We deliberately do not return anything here.  See explanation
+    // in public documentation above.
+  }
+
+  //! Assignment operator volatile LHS and non-volatile RHS
+  KOKKOS_INLINE_FUNCTION complex& operator=(
+      const RealType& val) volatile noexcept {
+    re_ = val;
+    im_ = RealType(0);
+    return *this;
+  }
+
+  //! Assignment operator volatile LHS and volatile RHS
+  // TODO Should this return void like the other volatile assignment operators?
+  KOKKOS_INLINE_FUNCTION complex& operator=(
+      const volatile RealType& val) volatile noexcept {
+    re_ = val;
+    im_ = RealType(0);
+    return *this;
+  }
+
+  //! The imaginary part of this complex number (volatile overload).
+  KOKKOS_INLINE_FUNCTION
+  volatile RealType& imag() volatile noexcept { return im_; }
+
+  //! The real part of this complex number (volatile overload).
+  KOKKOS_INLINE_FUNCTION
+  volatile RealType& real() volatile noexcept { return re_; }
+
+  //! The imaginary part of this complex number (volatile overload).
+  KOKKOS_INLINE_FUNCTION
+  RealType imag() const volatile noexcept { return im_; }
+
+  //! The real part of this complex number (volatile overload).
+  KOKKOS_INLINE_FUNCTION
+  RealType real() const volatile noexcept { return re_; }
+
+  KOKKOS_INLINE_FUNCTION void operator+=(
+      const volatile complex<RealType>& src) volatile noexcept {
+    re_ += src.re_;
+    im_ += src.im_;
+  }
+
+  KOKKOS_INLINE_FUNCTION void operator+=(
+      const volatile RealType& src) volatile noexcept {
+    re_ += src;
+  }
+
+  KOKKOS_INLINE_FUNCTION void operator*=(
+      const volatile complex<RealType>& src) volatile noexcept {
+    const RealType realPart = re_ * src.re_ - im_ * src.im_;
+    const RealType imagPart = re_ * src.im_ + im_ * src.re_;
+
+    re_ = realPart;
+    im_ = imagPart;
+  }
+
+  KOKKOS_INLINE_FUNCTION void operator*=(
+      const volatile RealType& src) volatile noexcept {
+    re_ *= src;
+    im_ *= src;
+  }
+
+  // TODO DSH 2019-10-7 why are there no volatile /= and friends?
+};
+
+//==============================================================================
+// <editor-fold desc="Equality and inequality"> {{{1
+
+// Note that this is not the same behavior as std::complex, which doesn't allow
+// implicit conversions, but since this is the way we had it before, we have
+// to do it this way now.
+
+//! Binary == operator for complex complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION bool operator==(complex<RealType1> const& x,
+                                       complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) == common_type(y.real()) &&
+         common_type(x.imag()) == common_type(y.imag());
+}
+
+// TODO (here and elsewhere) decide if we should convert to a Kokkos::complex
+//      and do the comparison in a device-marked function
+//! Binary == operator for std::complex complex.
+template <class RealType1, class RealType2>
+inline bool operator==(std::complex<RealType1> const& x,
+                       complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) == common_type(y.real()) &&
+         common_type(x.imag()) == common_type(y.imag());
+}
+
+//! Binary == operator for complex std::complex.
+template <class RealType1, class RealType2>
+inline bool operator==(complex<RealType1> const& x,
+                       std::complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) == common_type(y.real()) &&
+         common_type(x.imag()) == common_type(y.imag());
+}
+
+//! Binary == operator for complex real.
+template <
+    class RealType1, class RealType2,
+    // Constraints to avoid participation in oparator==() for every possible RHS
+    std::enable_if_t<std::is_convertible<RealType2, RealType1>::value, int> = 0>
+KOKKOS_INLINE_FUNCTION bool operator==(complex<RealType1> const& x,
+                                       RealType2 const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) == common_type(y) &&
+         common_type(x.imag()) == common_type(0);
+}
+
+//! Binary == operator for real complex.
+template <
+    class RealType1, class RealType2,
+    // Constraints to avoid participation in oparator==() for every possible RHS
+    std::enable_if_t<std::is_convertible<RealType1, RealType2>::value, int> = 0>
+KOKKOS_INLINE_FUNCTION bool operator==(RealType1 const& x,
+                                       complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x) == common_type(y.real()) &&
+         common_type(0) == common_type(y.imag());
+}
+
+//! Binary != operator for complex complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION bool operator!=(complex<RealType1> const& x,
+                                       complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) != common_type(y.real()) ||
+         common_type(x.imag()) != common_type(y.imag());
+}
+
+//! Binary != operator for std::complex complex.
+template <class RealType1, class RealType2>
+inline bool operator!=(std::complex<RealType1> const& x,
+                       complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) != common_type(y.real()) ||
+         common_type(x.imag()) != common_type(y.imag());
+}
+
+//! Binary != operator for complex std::complex.
+template <class RealType1, class RealType2>
+inline bool operator!=(complex<RealType1> const& x,
+                       std::complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) != common_type(y.real()) ||
+         common_type(x.imag()) != common_type(y.imag());
+}
+
+//! Binary != operator for complex real.
+template <
+    class RealType1, class RealType2,
+    // Constraints to avoid participation in oparator==() for every possible RHS
+    std::enable_if_t<std::is_convertible<RealType2, RealType1>::value, int> = 0>
+KOKKOS_INLINE_FUNCTION bool operator!=(complex<RealType1> const& x,
+                                       RealType2 const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x.real()) != common_type(y) ||
+         common_type(x.imag()) != common_type(0);
+}
+
+//! Binary != operator for real complex.
+template <
+    class RealType1, class RealType2,
+    // Constraints to avoid participation in oparator==() for every possible RHS
+    std::enable_if_t<std::is_convertible<RealType1, RealType2>::value, int> = 0>
+KOKKOS_INLINE_FUNCTION bool operator!=(RealType1 const& x,
+                                       complex<RealType2> const& y) noexcept {
+  using common_type = std::common_type_t<RealType1, RealType2>;
+  return common_type(x) != common_type(y.real()) ||
+         common_type(0) != common_type(y.imag());
+}
+
+// </editor-fold> end Equality and inequality }}}1
+//==============================================================================
+
+//! Binary + operator for complex complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator+(const complex<RealType1>& x, const complex<RealType2>& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x.real() + y.real(),
+                                                           x.imag() + y.imag());
+}
+
+//! Binary + operator for complex scalar.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator+(const complex<RealType1>& x, const RealType2& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x.real() + y,
+                                                           x.imag());
+}
+
+//! Binary + operator for scalar complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator+(const RealType1& x, const complex<RealType2>& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x + y.real(),
+                                                           y.imag());
+}
+
+//! Unary + operator for complex.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION complex<RealType> operator+(
+    const complex<RealType>& x) noexcept {
+  return complex<RealType>{+x.real(), +x.imag()};
+}
+
+//! Binary - operator for complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator-(const complex<RealType1>& x, const complex<RealType2>& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x.real() - y.real(),
+                                                           x.imag() - y.imag());
+}
+
+//! Binary - operator for complex scalar.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator-(const complex<RealType1>& x, const RealType2& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x.real() - y,
+                                                           x.imag());
+}
+
+//! Binary - operator for scalar complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator-(const RealType1& x, const complex<RealType2>& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x - y.real(),
+                                                           -y.imag());
+}
+
+//! Unary - operator for complex.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION complex<RealType> operator-(
+    const complex<RealType>& x) noexcept {
+  return complex<RealType>(-x.real(), -x.imag());
+}
+
+//! Binary * operator for complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator*(const complex<RealType1>& x, const complex<RealType2>& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(
+      x.real() * y.real() - x.imag() * y.imag(),
+      x.real() * y.imag() + x.imag() * y.real());
+}
+
+/// \brief Binary * operator for std::complex and complex.
+///
+/// This needs to exist because template parameters can't be deduced when
+/// conversions occur.  We could probably fix this using hidden friends patterns
+///
+/// This function cannot be called in a CUDA device function, because
+/// std::complex's methods and nonmember functions are not marked as
+/// CUDA device functions.
+template <class RealType1, class RealType2>
+inline complex<std::common_type_t<RealType1, RealType2>> operator*(
+    const std::complex<RealType1>& x, const complex<RealType2>& y) {
+  return complex<std::common_type_t<RealType1, RealType2>>(
+      x.real() * y.real() - x.imag() * y.imag(),
+      x.real() * y.imag() + x.imag() * y.real());
+}
+
+/// \brief Binary * operator for RealType times complex.
+///
+/// This function exists because the compiler doesn't know that
+/// RealType and complex<RealType> commute with respect to operator*.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator*(const RealType1& x, const complex<RealType2>& y) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x * y.real(),
+                                                           x * y.imag());
+}
+
+/// \brief Binary * operator for RealType times complex.
+///
+/// This function exists because the compiler doesn't know that
+/// RealType and complex<RealType> commute with respect to operator*.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator*(const complex<RealType1>& y, const RealType2& x) noexcept {
+  return complex<std::common_type_t<RealType1, RealType2>>(x * y.real(),
+                                                           x * y.imag());
+}
+
+//! Imaginary part of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION RealType imag(const complex<RealType>& x) noexcept {
+  return x.imag();
+}
+
+template <class ArithmeticType>
+KOKKOS_INLINE_FUNCTION constexpr Impl::promote_t<ArithmeticType> imag(
+    ArithmeticType) {
+  return ArithmeticType();
+}
+
+//! Real part of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION RealType real(const complex<RealType>& x) noexcept {
+  return x.real();
+}
+
+template <class ArithmeticType>
+KOKKOS_INLINE_FUNCTION constexpr Impl::promote_t<ArithmeticType> real(
+    ArithmeticType x) {
+  return x;
+}
+
+//! Constructs a complex number from magnitude and phase angle
+template <class T>
+KOKKOS_INLINE_FUNCTION complex<T> polar(const T& r, const T& theta = T()) {
+  KOKKOS_EXPECTS(r >= 0);
+  return complex<T>(r * cos(theta), r * sin(theta));
+}
+
+//! Absolute value (magnitude) of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION RealType abs(const complex<RealType>& x) {
+  return hypot(x.real(), x.imag());
+}
+
+//! Power of a complex number
+template <class T>
+KOKKOS_INLINE_FUNCTION complex<T> pow(const complex<T>& x, const T& y) {
+  T r     = abs(x);
+  T theta = atan2(x.imag(), x.real());
+  return polar(pow(r, y), y * theta);
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION complex<T> pow(const T& x, const complex<T>& y) {
+  return pow(complex<T>(x), y);
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION complex<T> pow(const complex<T>& x,
+                                      const complex<T>& y) {
+  return x == T() ? T() : exp(y * log(x));
+}
+
+template <class T, class U,
+          class = std::enable_if_t<std::is_arithmetic<T>::value>>
+KOKKOS_INLINE_FUNCTION complex<Impl::promote_2_t<T, U>> pow(
+    const T& x, const complex<U>& y) {
+  using type = Impl::promote_2_t<T, U>;
+  return pow(type(x), complex<type>(y));
+}
+
+template <class T, class U,
+          class = std::enable_if_t<std::is_arithmetic<U>::value>>
+KOKKOS_INLINE_FUNCTION complex<Impl::promote_2_t<T, U>> pow(const complex<T>& x,
+                                                            const U& y) {
+  using type = Impl::promote_2_t<T, U>;
+  return pow(complex<type>(x), type(y));
+}
+
+template <class T, class U>
+KOKKOS_INLINE_FUNCTION complex<Impl::promote_2_t<T, U>> pow(
+    const complex<T>& x, const complex<U>& y) {
+  using type = Impl::promote_2_t<T, U>;
+  return pow(complex<type>(x), complex<type>(y));
+}
+
+//! Square root of a complex number. This is intended to match the stdc++
+//! implementation, which returns sqrt(z*z) = z; where z is complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> sqrt(
+    const complex<RealType>& x) {
+  RealType r = x.real();
+  RealType i = x.imag();
+
+  if (r == RealType()) {
+    RealType t = sqrt(fabs(i) / 2);
+    return Kokkos::complex<RealType>(t, i < RealType() ? -t : t);
+  } else {
+    RealType t = sqrt(2 * (abs(x) + fabs(r)));
+    RealType u = t / 2;
+    return r > RealType() ? Kokkos::complex<RealType>(u, i / t)
+                          : Kokkos::complex<RealType>(fabs(i) / t,
+                                                      i < RealType() ? -u : u);
+  }
+}
+
+//! Conjugate of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION complex<RealType> conj(
+    const complex<RealType>& x) noexcept {
+  return complex<RealType>(real(x), -imag(x));
+}
+
+template <class ArithmeticType>
+KOKKOS_INLINE_FUNCTION constexpr complex<Impl::promote_t<ArithmeticType>> conj(
+    ArithmeticType x) {
+  using type = Impl::promote_t<ArithmeticType>;
+  return complex<type>(x, -type());
+}
+
+//! Exponential of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION complex<RealType> exp(const complex<RealType>& x) {
+  return exp(x.real()) * complex<RealType>(cos(x.imag()), sin(x.imag()));
+}
+
+//! natural log of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> log(
+    const complex<RealType>& x) {
+  RealType phi = atan2(x.imag(), x.real());
+  return Kokkos::complex<RealType>(log(abs(x)), phi);
+}
+
+//! base 10 log of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> log10(
+    const complex<RealType>& x) {
+  return log(x) / log(RealType(10));
+}
+
+//! sine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> sin(
+    const complex<RealType>& x) {
+  return Kokkos::complex<RealType>(sin(x.real()) * cosh(x.imag()),
+                                   cos(x.real()) * sinh(x.imag()));
+}
+
+//! cosine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> cos(
+    const complex<RealType>& x) {
+  return Kokkos::complex<RealType>(cos(x.real()) * cosh(x.imag()),
+                                   -sin(x.real()) * sinh(x.imag()));
+}
+
+//! tangent of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> tan(
+    const complex<RealType>& x) {
+  return sin(x) / cos(x);
+}
+
+//! hyperbolic sine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> sinh(
+    const complex<RealType>& x) {
+  return Kokkos::complex<RealType>(sinh(x.real()) * cos(x.imag()),
+                                   cosh(x.real()) * sin(x.imag()));
+}
+
+//! hyperbolic cosine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> cosh(
+    const complex<RealType>& x) {
+  return Kokkos::complex<RealType>(cosh(x.real()) * cos(x.imag()),
+                                   sinh(x.real()) * sin(x.imag()));
+}
+
+//! hyperbolic tangent of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> tanh(
+    const complex<RealType>& x) {
+  return sinh(x) / cosh(x);
+}
+
+//! inverse hyperbolic sine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> asinh(
+    const complex<RealType>& x) {
+  return log(x + sqrt(x * x + RealType(1.0)));
+}
+
+//! inverse hyperbolic cosine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> acosh(
+    const complex<RealType>& x) {
+  return RealType(2.0) * log(sqrt(RealType(0.5) * (x + RealType(1.0))) +
+                             sqrt(RealType(0.5) * (x - RealType(1.0))));
+}
+
+//! inverse hyperbolic tangent of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> atanh(
+    const complex<RealType>& x) {
+  const RealType i2 = x.imag() * x.imag();
+  const RealType r  = RealType(1.0) - i2 - x.real() * x.real();
+
+  RealType p = RealType(1.0) + x.real();
+  RealType m = RealType(1.0) - x.real();
+
+  p = i2 + p * p;
+  m = i2 + m * m;
+
+  RealType phi = atan2(RealType(2.0) * x.imag(), r);
+  return Kokkos::complex<RealType>(RealType(0.25) * (log(p) - log(m)),
+                                   RealType(0.5) * phi);
+}
+
+//! inverse sine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> asin(
+    const complex<RealType>& x) {
+  Kokkos::complex<RealType> t =
+      asinh(Kokkos::complex<RealType>(-x.imag(), x.real()));
+  return Kokkos::complex<RealType>(t.imag(), -t.real());
+}
+
+//! inverse cosine of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> acos(
+    const complex<RealType>& x) {
+  Kokkos::complex<RealType> t = asin(x);
+  RealType pi_2               = acos(RealType(0.0));
+  return Kokkos::complex<RealType>(pi_2 - t.real(), -t.imag());
+}
+
+//! inverse tangent of a complex number.
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> atan(
+    const complex<RealType>& x) {
+  const RealType r2 = x.real() * x.real();
+  const RealType i  = RealType(1.0) - r2 - x.imag() * x.imag();
+
+  RealType p = x.imag() + RealType(1.0);
+  RealType m = x.imag() - RealType(1.0);
+
+  p = r2 + p * p;
+  m = r2 + m * m;
+
+  return Kokkos::complex<RealType>(
+      RealType(0.5) * atan2(RealType(2.0) * x.real(), i),
+      RealType(0.25) * log(p / m));
+}
+
+/// This function cannot be called in a CUDA device function,
+/// because std::complex's methods and nonmember functions are not
+/// marked as CUDA device functions.
+template <class RealType>
+inline complex<RealType> exp(const std::complex<RealType>& c) {
+  return complex<RealType>(std::exp(c.real()) * std::cos(c.imag()),
+                           std::exp(c.real()) * std::sin(c.imag()));
+}
+
+//! Binary operator / for complex and real numbers
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator/(const complex<RealType1>& x,
+          const RealType2& y) noexcept(noexcept(RealType1{} / RealType2{})) {
+  return complex<std::common_type_t<RealType1, RealType2>>(real(x) / y,
+                                                           imag(x) / y);
+}
+
+//! Binary operator / for complex.
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator/(const complex<RealType1>& x,
+          const complex<RealType2>& y) noexcept(noexcept(RealType1{} /
+                                                         RealType2{})) {
+  // Scale (by the "1-norm" of y) to avoid unwarranted overflow.
+  // If the real part is +/-Inf and the imaginary part is -/+Inf,
+  // this won't change the result.
+  using common_real_type   = std::common_type_t<RealType1, RealType2>;
+  const common_real_type s = fabs(real(y)) + fabs(imag(y));
+
+  // If s is 0, then y is zero, so x/y == real(x)/0 + i*imag(x)/0.
+  // In that case, the relation x/y == (x/s) / (y/s) doesn't hold,
+  // because y/s is NaN.
+  if (s == 0.0) {
+    return complex<common_real_type>(real(x) / s, imag(x) / s);
+  } else {
+    const complex<common_real_type> x_scaled(real(x) / s, imag(x) / s);
+    const complex<common_real_type> y_conj_scaled(real(y) / s, -imag(y) / s);
+    const RealType1 y_scaled_abs =
+        real(y_conj_scaled) * real(y_conj_scaled) +
+        imag(y_conj_scaled) * imag(y_conj_scaled);  // abs(y) == abs(conj(y))
+    complex<common_real_type> result = x_scaled * y_conj_scaled;
+    result /= y_scaled_abs;
+    return result;
+  }
+}
+
+//! Binary operator / for complex and real numbers
+template <class RealType1, class RealType2>
+KOKKOS_INLINE_FUNCTION complex<std::common_type_t<RealType1, RealType2>>
+operator/(const RealType1& x,
+          const complex<RealType2>& y) noexcept(noexcept(RealType1{} /
+                                                         RealType2{})) {
+  return complex<std::common_type_t<RealType1, RealType2>>(x) / y;
+}
+
+template <class RealType>
+std::ostream& operator<<(std::ostream& os, const complex<RealType>& x) {
+  const std::complex<RealType> x_std(Kokkos::real(x), Kokkos::imag(x));
+  os << x_std;
+  return os;
+}
+
+template <class RealType>
+std::istream& operator>>(std::istream& is, complex<RealType>& x) {
+  std::complex<RealType> x_std;
+  is >> x_std;
+  x = x_std;  // only assigns on success of above
+  return is;
+}
+
+template <class T>
+struct reduction_identity<Kokkos::complex<T>> {
+  using t_red_ident = reduction_identity<T>;
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::complex<T>
+  sum() noexcept {
+    return Kokkos::complex<T>(t_red_ident::sum(), t_red_ident::sum());
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::complex<T>
+  prod() noexcept {
+    return Kokkos::complex<T>(t_red_ident::prod(), t_red_ident::sum());
+  }
+};
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_COMPLEX
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_COMPLEX
+#endif
+#endif  // KOKKOS_COMPLEX_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Concepts.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Concepts.hpp
new file mode 100644 (file)
index 0000000..63f2b89
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_CORE_CONCEPTS_HPP
+#define KOKKOS_CORE_CONCEPTS_HPP
+
+#include <type_traits>
+
+// Needed for 'is_space<S>::host_mirror_space
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_DetectionIdiom.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+// Schedules for Execution Policies
+struct Static {};
+struct Dynamic {};
+
+// Schedule Wrapper Type
+template <class T>
+struct Schedule {
+  static_assert(std::is_same<T, Static>::value ||
+                    std::is_same<T, Dynamic>::value,
+                "Kokkos: Invalid Schedule<> type.");
+  using schedule_type = Schedule;
+  using type          = T;
+};
+
+// Specify Iteration Index Type
+template <typename T>
+struct IndexType {
+  static_assert(std::is_integral<T>::value, "Kokkos: Invalid IndexType<>.");
+  using index_type = IndexType;
+  using type       = T;
+};
+
+namespace Experimental {
+struct WorkItemProperty {
+  template <unsigned long Property>
+  struct ImplWorkItemProperty {
+    static const unsigned value = Property;
+    using work_item_property    = ImplWorkItemProperty<Property>;
+  };
+
+  constexpr static const ImplWorkItemProperty<0> None =
+      ImplWorkItemProperty<0>();
+  constexpr static const ImplWorkItemProperty<1> HintLightWeight =
+      ImplWorkItemProperty<1>();
+  constexpr static const ImplWorkItemProperty<2> HintHeavyWeight =
+      ImplWorkItemProperty<2>();
+  constexpr static const ImplWorkItemProperty<4> HintRegular =
+      ImplWorkItemProperty<4>();
+  constexpr static const ImplWorkItemProperty<8> HintIrregular =
+      ImplWorkItemProperty<8>();
+  using None_t            = ImplWorkItemProperty<0>;
+  using HintLightWeight_t = ImplWorkItemProperty<1>;
+  using HintHeavyWeight_t = ImplWorkItemProperty<2>;
+  using HintRegular_t     = ImplWorkItemProperty<4>;
+  using HintIrregular_t   = ImplWorkItemProperty<8>;
+};
+
+template <unsigned long pv1, unsigned long pv2>
+inline constexpr WorkItemProperty::ImplWorkItemProperty<pv1 | pv2> operator|(
+    WorkItemProperty::ImplWorkItemProperty<pv1>,
+    WorkItemProperty::ImplWorkItemProperty<pv2>) {
+  return WorkItemProperty::ImplWorkItemProperty<pv1 | pv2>();
+}
+
+template <unsigned long pv1, unsigned long pv2>
+inline constexpr WorkItemProperty::ImplWorkItemProperty<pv1 & pv2> operator&(
+    WorkItemProperty::ImplWorkItemProperty<pv1>,
+    WorkItemProperty::ImplWorkItemProperty<pv2>) {
+  return WorkItemProperty::ImplWorkItemProperty<pv1 & pv2>();
+}
+
+template <unsigned long pv1, unsigned long pv2>
+inline constexpr bool operator==(WorkItemProperty::ImplWorkItemProperty<pv1>,
+                                 WorkItemProperty::ImplWorkItemProperty<pv2>) {
+  return pv1 == pv2;
+}
+
+}  // namespace Experimental
+
+/**\brief Specify Launch Bounds for CUDA execution.
+ *
+ *  If no launch bounds specified then do not set launch bounds.
+ */
+template <unsigned int maxT = 0 /* Max threads per block */
+          ,
+          unsigned int minB = 0 /* Min blocks per SM */
+          >
+struct LaunchBounds {
+  using launch_bounds = LaunchBounds;
+  using type          = LaunchBounds<maxT, minB>;
+  static unsigned int constexpr maxTperB{maxT};
+  static unsigned int constexpr minBperSM{minB};
+};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+#define KOKKOS_IMPL_IS_CONCEPT(CONCEPT)                        \
+  template <typename T>                                        \
+  struct is_##CONCEPT {                                        \
+   private:                                                    \
+    template <typename U>                                      \
+    using have_t = typename U::CONCEPT;                        \
+    template <typename U>                                      \
+    using have_type_t = typename U::CONCEPT##_type;            \
+                                                               \
+   public:                                                     \
+    static constexpr bool value =                              \
+        std::is_base_of<detected_t<have_t, T>, T>::value ||    \
+        std::is_base_of<detected_t<have_type_t, T>, T>::value; \
+    constexpr operator bool() const noexcept { return value; } \
+  };
+
+// Public concept:
+
+KOKKOS_IMPL_IS_CONCEPT(memory_space)
+KOKKOS_IMPL_IS_CONCEPT(memory_traits)
+KOKKOS_IMPL_IS_CONCEPT(execution_space)
+KOKKOS_IMPL_IS_CONCEPT(execution_policy)
+KOKKOS_IMPL_IS_CONCEPT(array_layout)
+KOKKOS_IMPL_IS_CONCEPT(reducer)
+namespace Experimental {
+KOKKOS_IMPL_IS_CONCEPT(work_item_property)
+KOKKOS_IMPL_IS_CONCEPT(hooks_policy)
+}  // namespace Experimental
+
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+// For backward compatibility:
+
+template <typename T>
+using is_array_layout KOKKOS_DEPRECATED = Kokkos::is_array_layout<T>;
+template <typename T>
+using is_execution_policy KOKKOS_DEPRECATED = Kokkos::is_execution_policy<T>;
+template <typename T>
+using is_execution_space KOKKOS_DEPRECATED = Kokkos::is_execution_space<T>;
+template <typename T>
+using is_memory_space KOKKOS_DEPRECATED = Kokkos::is_memory_space<T>;
+template <typename T>
+using is_memory_traits KOKKOS_DEPRECATED = Kokkos::is_memory_traits<T>;
+#endif
+
+// Implementation concept:
+
+KOKKOS_IMPL_IS_CONCEPT(thread_team_member)
+KOKKOS_IMPL_IS_CONCEPT(host_thread_team_member)
+KOKKOS_IMPL_IS_CONCEPT(graph_kernel)
+
+}  // namespace Impl
+
+#undef KOKKOS_IMPL_IS_CONCEPT
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Object>
+class has_member_team_shmem_size {
+  template <typename T>
+  static int32_t test_for_member(decltype(&T::team_shmem_size)) {
+    return int32_t(0);
+  }
+  template <typename T>
+  static int64_t test_for_member(...) {
+    return int64_t(0);
+  }
+
+ public:
+  constexpr static bool value =
+      sizeof(test_for_member<Object>(nullptr)) == sizeof(int32_t);
+};
+
+template <class Object>
+class has_member_shmem_size {
+  template <typename T>
+  static int32_t test_for_member(decltype(&T::shmem_size_me)) {
+    return int32_t(0);
+  }
+  template <typename T>
+  static int64_t test_for_member(...) {
+    return int64_t(0);
+  }
+
+ public:
+  constexpr static bool value =
+      sizeof(test_for_member<Object>(0)) == sizeof(int32_t);
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class ExecutionSpace, class MemorySpace>
+struct Device {
+  static_assert(Kokkos::is_execution_space<ExecutionSpace>::value,
+                "Execution space is not valid");
+  static_assert(Kokkos::is_memory_space<MemorySpace>::value,
+                "Memory space is not valid");
+  using execution_space = ExecutionSpace;
+  using memory_space    = MemorySpace;
+  using device_type     = Device<execution_space, memory_space>;
+};
+
+namespace Impl {
+
+template <typename T>
+struct is_device_helper : std::false_type {};
+
+template <typename ExecutionSpace, typename MemorySpace>
+struct is_device_helper<Device<ExecutionSpace, MemorySpace>> : std::true_type {
+};
+
+}  // namespace Impl
+
+template <typename T>
+using is_device = typename Impl::is_device_helper<std::remove_cv_t<T>>::type;
+
+//----------------------------------------------------------------------------
+
+template <typename T>
+struct is_space {
+ private:
+  template <typename, typename = void>
+  struct exe : std::false_type {
+    using space = void;
+  };
+
+  template <typename, typename = void>
+  struct mem : std::false_type {
+    using space = void;
+  };
+
+  template <typename, typename = void>
+  struct dev : std::false_type {
+    using space = void;
+  };
+
+  template <typename U>
+  struct exe<U, std::conditional_t<true, void, typename U::execution_space>>
+      : std::is_same<U, typename U::execution_space>::type {
+    using space = typename U::execution_space;
+  };
+
+  template <typename U>
+  struct mem<U, std::conditional_t<true, void, typename U::memory_space>>
+      : std::is_same<U, typename U::memory_space>::type {
+    using space = typename U::memory_space;
+  };
+
+  template <typename U>
+  struct dev<U, std::conditional_t<true, void, typename U::device_type>>
+      : std::is_same<U, typename U::device_type>::type {
+    using space = typename U::device_type;
+  };
+
+  using is_exe = typename is_space<T>::template exe<std::remove_cv_t<T>>;
+  using is_mem = typename is_space<T>::template mem<std::remove_cv_t<T>>;
+  using is_dev = typename is_space<T>::template dev<std::remove_cv_t<T>>;
+
+ public:
+  static constexpr bool value = is_exe::value || is_mem::value || is_dev::value;
+
+  constexpr operator bool() const noexcept { return value; }
+
+  using execution_space = typename is_exe::space;
+  using memory_space    = typename is_mem::space;
+
+  // For backward compatibility, deprecated in favor of
+  // Kokkos::Impl::HostMirror<S>::host_mirror_space
+
+ private:
+  // The actual definitions for host_memory_space and host_execution_spaces are
+  // in do_not_use_host_memory_space and do_not_use_host_execution_space to be
+  // able to use them within this class without deprecation warnings.
+  using do_not_use_host_memory_space = std::conditional_t<
+      std::is_same<memory_space, Kokkos::HostSpace>::value
+#if defined(KOKKOS_ENABLE_CUDA)
+          || std::is_same<memory_space, Kokkos::CudaUVMSpace>::value ||
+          std::is_same<memory_space, Kokkos::CudaHostPinnedSpace>::value
+#elif defined(KOKKOS_ENABLE_HIP)
+          || std::is_same<memory_space,
+                          Kokkos::Experimental::HIPHostPinnedSpace>::value ||
+          std::is_same<memory_space,
+                       Kokkos::Experimental::HIPManagedSpace>::value
+#elif defined(KOKKOS_ENABLE_SYCL)
+          || std::is_same<memory_space,
+                          Kokkos::Experimental::SYCLSharedUSMSpace>::value ||
+          std::is_same<memory_space,
+                       Kokkos::Experimental::SYCLHostUSMSpace>::value
+#endif
+      ,
+      memory_space, Kokkos::HostSpace>;
+
+  using do_not_use_host_execution_space = std::conditional_t<
+#if defined(KOKKOS_ENABLE_CUDA)
+      std::is_same<execution_space, Kokkos::Cuda>::value ||
+#elif defined(KOKKOS_ENABLE_HIP)
+      std::is_same<execution_space, Kokkos::Experimental::HIP>::value ||
+#elif defined(KOKKOS_ENABLE_SYCL)
+      std::is_same<execution_space, Kokkos::Experimental::SYCL>::value ||
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
+      std::is_same<execution_space,
+                   Kokkos::Experimental::OpenMPTarget>::value ||
+#endif
+          false,
+      Kokkos::DefaultHostExecutionSpace, execution_space>;
+
+ public:
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  using host_memory_space KOKKOS_DEPRECATED = do_not_use_host_memory_space;
+  using host_execution_space KOKKOS_DEPRECATED =
+      do_not_use_host_execution_space;
+  using host_mirror_space KOKKOS_DEPRECATED = std::conditional_t<
+      std::is_same<execution_space, do_not_use_host_execution_space>::value &&
+          std::is_same<memory_space, do_not_use_host_memory_space>::value,
+      T,
+      Kokkos::Device<do_not_use_host_execution_space,
+                     do_not_use_host_memory_space>>;
+#endif
+};
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+// For backward compatibility
+
+namespace Impl {
+
+template <typename T>
+using is_space KOKKOS_DEPRECATED = Kokkos::is_space<T>;
+
+}
+#endif
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/**\brief  Access relationship between DstMemorySpace and SrcMemorySpace
+ *
+ *  The default case can assume accessibility for the same space.
+ *  Specializations must be defined for different memory spaces.
+ */
+template <typename DstMemorySpace, typename SrcMemorySpace>
+struct MemorySpaceAccess {
+  static_assert(Kokkos::is_memory_space<DstMemorySpace>::value &&
+                    Kokkos::is_memory_space<SrcMemorySpace>::value,
+                "template arguments must be memory spaces");
+
+  /**\brief  Can a View (or pointer) to memory in SrcMemorySpace
+   *         be assigned to a View (or pointer) to memory marked DstMemorySpace.
+   *
+   *  1. DstMemorySpace::execution_space == SrcMemorySpace::execution_space
+   *  2. All execution spaces that can access DstMemorySpace can also access
+   *     SrcMemorySpace.
+   */
+  enum { assignable = std::is_same<DstMemorySpace, SrcMemorySpace>::value };
+
+  /**\brief  For all DstExecSpace::memory_space == DstMemorySpace
+   *         DstExecSpace can access SrcMemorySpace.
+   */
+  enum { accessible = assignable };
+
+  /**\brief  Does a DeepCopy capability exist
+   *         to DstMemorySpace from SrcMemorySpace
+   */
+  enum { deepcopy = assignable };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+/**\brief  Can AccessSpace access MemorySpace ?
+ *
+ *   Requires:
+ *     Kokkos::is_space< AccessSpace >::value
+ *     Kokkos::is_memory_space< MemorySpace >::value
+ *
+ *   Can AccessSpace::execution_space access MemorySpace ?
+ *     enum : bool { accessible };
+ *
+ *   Is View<AccessSpace::memory_space> assignable from View<MemorySpace> ?
+ *     enum : bool { assignable };
+ *
+ *   If ! accessible then through which intercessory memory space
+ *   should a be used to deep copy memory for
+ *     AccessSpace::execution_space
+ *   to get access.
+ *   When AccessSpace::memory_space == Kokkos::HostSpace
+ *   then space is the View host mirror space.
+ */
+template <typename AccessSpace, typename MemorySpace>
+struct SpaceAccessibility {
+ private:
+  static_assert(Kokkos::is_space<AccessSpace>::value,
+                "template argument #1 must be a Kokkos space");
+
+  static_assert(Kokkos::is_memory_space<MemorySpace>::value,
+                "template argument #2 must be a Kokkos memory space");
+
+  // The input AccessSpace may be a Device<ExecSpace,MemSpace>
+  // verify that it is a valid combination of spaces.
+  static_assert(Kokkos::Impl::MemorySpaceAccess<
+                    typename AccessSpace::execution_space::memory_space,
+                    typename AccessSpace::memory_space>::accessible,
+                "template argument #1 is an invalid space");
+
+  using exe_access = Kokkos::Impl::MemorySpaceAccess<
+      typename AccessSpace::execution_space::memory_space, MemorySpace>;
+
+  using mem_access =
+      Kokkos::Impl::MemorySpaceAccess<typename AccessSpace::memory_space,
+                                      MemorySpace>;
+
+ public:
+  /**\brief  Can AccessSpace::execution_space access MemorySpace ?
+   *
+   *  Default based upon memory space accessibility.
+   *  Specialization required for other relationships.
+   */
+  enum { accessible = exe_access::accessible };
+
+  /**\brief  Can assign to AccessSpace from MemorySpace ?
+   *
+   *  Default based upon memory space accessibility.
+   *  Specialization required for other relationships.
+   */
+  enum {
+    assignable = is_memory_space<AccessSpace>::value && mem_access::assignable
+  };
+
+  /**\brief  Can deep copy to AccessSpace::memory_Space from MemorySpace ?  */
+  enum { deepcopy = mem_access::deepcopy };
+
+  // What intercessory space for AccessSpace::execution_space
+  // to be able to access MemorySpace?
+  // If same memory space or not accessible use the AccessSpace
+  // else construct a device with execution space and memory space.
+  using space = std::conditional_t<
+      std::is_same<typename AccessSpace::memory_space, MemorySpace>::value ||
+          !exe_access::accessible,
+      AccessSpace,
+      Kokkos::Device<typename AccessSpace::execution_space, MemorySpace>>;
+};
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+namespace Kokkos {
+namespace Impl {
+
+// For backward compatibility
+template <typename AccessSpace, typename MemorySpace>
+using SpaceAccessibility KOKKOS_DEPRECATED =
+    Kokkos::SpaceAccessibility<AccessSpace, MemorySpace>;
+
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
+
+//----------------------------------------------------------------------------
+
+#endif  // KOKKOS_CORE_CONCEPTS_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_CopyViews.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_CopyViews.hpp
new file mode 100644 (file)
index 0000000..0a66ee9
--- /dev/null
@@ -0,0 +1,3962 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_COPYVIEWS_HPP_
+#define KOKKOS_COPYVIEWS_HPP_
+#include <string>
+#include <Kokkos_Parallel.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+#include <Kokkos_Layout.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class Layout>
+struct ViewFillLayoutSelector {};
+
+template <>
+struct ViewFillLayoutSelector<Kokkos::LayoutLeft> {
+  static const Kokkos::Iterate iterate = Kokkos::Iterate::Left;
+};
+
+template <>
+struct ViewFillLayoutSelector<Kokkos::LayoutRight> {
+  static const Kokkos::Iterate iterate = Kokkos::Iterate::Right;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 0, iType> {
+  using ST = typename ViewType::non_const_value_type;
+  ViewFill(const ViewType& a, const ST& val, const ExecSpace& space) {
+    Kokkos::Impl::DeepCopy<typename ViewType::memory_space, Kokkos::HostSpace,
+                           ExecSpace>(space, a.data(), &val, sizeof(ST));
+  }
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 1, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+  using policy_type = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for("Kokkos::ViewFill-1D",
+                         policy_type(space, 0, a.extent(0)), *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i) const { a(i) = val; };
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 2, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+
+  using iterate_type = Kokkos::Rank<2, ViewFillLayoutSelector<Layout>::iterate,
+                                    ViewFillLayoutSelector<Layout>::iterate>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for("Kokkos::ViewFill-2D",
+                         policy_type(space, {0, 0}, {a.extent(0), a.extent(1)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1) const { a(i0, i1) = val; };
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 3, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+
+  using iterate_type = Kokkos::Rank<3, ViewFillLayoutSelector<Layout>::iterate,
+                                    ViewFillLayoutSelector<Layout>::iterate>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for(
+        "Kokkos::ViewFill-3D",
+        policy_type(space, {0, 0, 0}, {a.extent(0), a.extent(1), a.extent(2)}),
+        *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2) const {
+    a(i0, i1, i2) = val;
+  };
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 4, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+
+  using iterate_type = Kokkos::Rank<4, ViewFillLayoutSelector<Layout>::iterate,
+                                    ViewFillLayoutSelector<Layout>::iterate>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for(
+        "Kokkos::ViewFill-4D",
+        policy_type(space, {0, 0, 0, 0},
+                    {a.extent(0), a.extent(1), a.extent(2), a.extent(3)}),
+        *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2,
+                  const iType& i3) const {
+    a(i0, i1, i2, i3) = val;
+  };
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 5, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+
+  using iterate_type = Kokkos::Rank<5, ViewFillLayoutSelector<Layout>::iterate,
+                                    ViewFillLayoutSelector<Layout>::iterate>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for("Kokkos::ViewFill-5D",
+                         policy_type(space, {0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(2),
+                                      a.extent(3), a.extent(4)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2,
+                  const iType& i3, const iType& i4) const {
+    a(i0, i1, i2, i3, i4) = val;
+  };
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 6, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+
+  using iterate_type = Kokkos::Rank<6, ViewFillLayoutSelector<Layout>::iterate,
+                                    ViewFillLayoutSelector<Layout>::iterate>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for("Kokkos::ViewFill-6D",
+                         policy_type(space, {0, 0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(2),
+                                      a.extent(3), a.extent(4), a.extent(5)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2,
+                  const iType& i3, const iType& i4, const iType& i5) const {
+    a(i0, i1, i2, i3, i4, i5) = val;
+  };
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 7, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+
+  using iterate_type = Kokkos::Rank<6, ViewFillLayoutSelector<Layout>::iterate,
+                                    ViewFillLayoutSelector<Layout>::iterate>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for("Kokkos::ViewFill-7D",
+                         policy_type(space, {0, 0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(2),
+                                      a.extent(3), a.extent(5), a.extent(6)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i3,
+                  const iType& i4, const iType& i5, const iType& i6) const {
+    for (iType i2 = 0; i2 < iType(a.extent(2)); i2++)
+      a(i0, i1, i2, i3, i4, i5, i6) = val;
+  };
+};
+
+template <class ViewType, class Layout, class ExecSpace, typename iType>
+struct ViewFill<ViewType, Layout, ExecSpace, 8, iType> {
+  ViewType a;
+  typename ViewType::const_value_type val;
+
+  using iterate_type = Kokkos::Rank<6, ViewFillLayoutSelector<Layout>::iterate,
+                                    ViewFillLayoutSelector<Layout>::iterate>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewFill(const ViewType& a_, typename ViewType::const_value_type& val_,
+           const ExecSpace& space)
+      : a(a_), val(val_) {
+    Kokkos::parallel_for("Kokkos::ViewFill-8D",
+                         policy_type(space, {0, 0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(3),
+                                      a.extent(5), a.extent(6), a.extent(7)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i3,
+                  const iType& i5, const iType& i6, const iType& i7) const {
+    for (iType i2 = 0; i2 < iType(a.extent(2)); i2++)
+      for (iType i4 = 0; i4 < iType(a.extent(4)); i4++)
+        a(i0, i1, i2, i3, i4, i5, i6, i7) = val;
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 1, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+
+  using policy_type = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<iType>>;
+  using value_type  = typename ViewTypeA::value_type;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for("Kokkos::ViewCopy-1D",
+                         policy_type(space, 0, a.extent(0)), *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0) const {
+    a(i0) = static_cast<value_type>(b(i0));
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 2, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+  using iterate_type =
+      Kokkos::Rank<2, outer_iteration_pattern, inner_iteration_pattern>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+  using value_type = typename ViewTypeA::value_type;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for("Kokkos::ViewCopy-2D",
+                         policy_type(space, {0, 0}, {a.extent(0), a.extent(1)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1) const {
+    a(i0, i1) = static_cast<value_type>(b(i0, i1));
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 3, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+  using iterate_type =
+      Kokkos::Rank<3, outer_iteration_pattern, inner_iteration_pattern>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+  using value_type = typename ViewTypeA::value_type;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for(
+        "Kokkos::ViewCopy-3D",
+        policy_type(space, {0, 0, 0}, {a.extent(0), a.extent(1), a.extent(2)}),
+        *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2) const {
+    a(i0, i1, i2) = static_cast<value_type>(b(i0, i1, i2));
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 4, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+  using iterate_type =
+      Kokkos::Rank<4, outer_iteration_pattern, inner_iteration_pattern>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for(
+        "Kokkos::ViewCopy-4D",
+        policy_type(space, {0, 0, 0, 0},
+                    {a.extent(0), a.extent(1), a.extent(2), a.extent(3)}),
+        *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2,
+                  const iType& i3) const {
+    a(i0, i1, i2, i3) = b(i0, i1, i2, i3);
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 5, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+  using iterate_type =
+      Kokkos::Rank<5, outer_iteration_pattern, inner_iteration_pattern>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for("Kokkos::ViewCopy-5D",
+                         policy_type(space, {0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(2),
+                                      a.extent(3), a.extent(4)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2,
+                  const iType& i3, const iType& i4) const {
+    a(i0, i1, i2, i3, i4) = b(i0, i1, i2, i3, i4);
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 6, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+  using iterate_type =
+      Kokkos::Rank<6, outer_iteration_pattern, inner_iteration_pattern>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for("Kokkos::ViewCopy-6D",
+                         policy_type(space, {0, 0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(2),
+                                      a.extent(3), a.extent(4), a.extent(5)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i2,
+                  const iType& i3, const iType& i4, const iType& i5) const {
+    a(i0, i1, i2, i3, i4, i5) = b(i0, i1, i2, i3, i4, i5);
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 7, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+  using iterate_type =
+      Kokkos::Rank<6, outer_iteration_pattern, inner_iteration_pattern>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for("Kokkos::ViewCopy-7D",
+                         policy_type(space, {0, 0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(3),
+                                      a.extent(4), a.extent(5), a.extent(6)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i3,
+                  const iType& i4, const iType& i5, const iType& i6) const {
+    for (iType i2 = 0; i2 < iType(a.extent(2)); i2++)
+      a(i0, i1, i2, i3, i4, i5, i6) = b(i0, i1, i2, i3, i4, i5, i6);
+  };
+};
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          typename iType>
+struct ViewCopy<ViewTypeA, ViewTypeB, Layout, ExecSpace, 8, iType> {
+  ViewTypeA a;
+  ViewTypeB b;
+
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::outer_iteration_pattern;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::layout_iterate_type_selector<Layout>::inner_iteration_pattern;
+  using iterate_type =
+      Kokkos::Rank<6, outer_iteration_pattern, inner_iteration_pattern>;
+  using policy_type =
+      Kokkos::MDRangePolicy<ExecSpace, iterate_type, Kokkos::IndexType<iType>>;
+
+  ViewCopy(const ViewTypeA& a_, const ViewTypeB& b_,
+           const ExecSpace space = ExecSpace())
+      : a(a_), b(b_) {
+    Kokkos::parallel_for("Kokkos::ViewCopy-8D",
+                         policy_type(space, {0, 0, 0, 0, 0, 0},
+                                     {a.extent(0), a.extent(1), a.extent(3),
+                                      a.extent(5), a.extent(6), a.extent(7)}),
+                         *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const iType& i0, const iType& i1, const iType& i3,
+                  const iType& i5, const iType& i6, const iType& i7) const {
+    for (iType i2 = 0; i2 < iType(a.extent(2)); i2++)
+      for (iType i4 = 0; i4 < iType(a.extent(4)); i4++)
+        a(i0, i1, i2, i3, i4, i5, i6, i7) = b(i0, i1, i2, i3, i4, i5, i6, i7);
+  };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecutionSpace, class DstType, class SrcType>
+void view_copy(const ExecutionSpace& space, const DstType& dst,
+               const SrcType& src) {
+  using dst_memory_space = typename DstType::memory_space;
+  using src_memory_space = typename SrcType::memory_space;
+
+  enum {
+    ExecCanAccessSrc =
+        Kokkos::SpaceAccessibility<ExecutionSpace, src_memory_space>::accessible
+  };
+  enum {
+    ExecCanAccessDst =
+        Kokkos::SpaceAccessibility<ExecutionSpace, dst_memory_space>::accessible
+  };
+
+  if (!(ExecCanAccessSrc && ExecCanAccessDst)) {
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::view_copy called with invalid execution space");
+  } else {
+    // Figure out iteration order in case we need it
+    int64_t strides[DstType::Rank + 1];
+    dst.stride(strides);
+    Kokkos::Iterate iterate;
+    if (Kokkos::is_layouttiled<typename DstType::array_layout>::value) {
+      iterate = Kokkos::layout_iterate_type_selector<
+          typename DstType::array_layout>::outer_iteration_pattern;
+    } else if (std::is_same<typename DstType::array_layout,
+                            Kokkos::LayoutRight>::value) {
+      iterate = Kokkos::Iterate::Right;
+    } else if (std::is_same<typename DstType::array_layout,
+                            Kokkos::LayoutLeft>::value) {
+      iterate = Kokkos::Iterate::Left;
+    } else if (std::is_same<typename DstType::array_layout,
+                            Kokkos::LayoutStride>::value) {
+      if (strides[0] > strides[DstType::Rank - 1])
+        iterate = Kokkos::Iterate::Right;
+      else
+        iterate = Kokkos::Iterate::Left;
+    } else {
+      if (std::is_same<typename DstType::execution_space::array_layout,
+                       Kokkos::LayoutRight>::value)
+        iterate = Kokkos::Iterate::Right;
+      else
+        iterate = Kokkos::Iterate::Left;
+    }
+
+    if ((dst.span() >= size_t(std::numeric_limits<int>::max())) ||
+        (src.span() >= size_t(std::numeric_limits<int>::max()))) {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutRight, ExecutionSpace, DstType::Rank, int64_t>(
+            dst, src, space);
+      else
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutLeft, ExecutionSpace, DstType::Rank, int64_t>(
+            dst, src, space);
+    } else {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutRight, ExecutionSpace, DstType::Rank, int>(dst, src,
+                                                                     space);
+      else
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutLeft, ExecutionSpace, DstType::Rank, int>(dst, src,
+                                                                    space);
+    }
+  }
+}
+
+template <class DstType, class SrcType>
+void view_copy(const DstType& dst, const SrcType& src) {
+  using dst_execution_space = typename DstType::execution_space;
+  using src_execution_space = typename SrcType::execution_space;
+  using dst_memory_space    = typename DstType::memory_space;
+  using src_memory_space    = typename SrcType::memory_space;
+
+  enum {
+    DstExecCanAccessSrc =
+        Kokkos::SpaceAccessibility<dst_execution_space,
+                                   src_memory_space>::accessible
+  };
+
+  enum {
+    SrcExecCanAccessDst =
+        Kokkos::SpaceAccessibility<src_execution_space,
+                                   dst_memory_space>::accessible
+  };
+
+  if (!DstExecCanAccessSrc && !SrcExecCanAccessDst) {
+    std::string message(
+        "Error: Kokkos::deep_copy with no available copy mechanism: ");
+    message += src.label();
+    message += " to ";
+    message += dst.label();
+    Kokkos::Impl::throw_runtime_exception(message);
+  }
+
+  // Figure out iteration order in case we need it
+  int64_t strides[DstType::Rank + 1];
+  dst.stride(strides);
+  Kokkos::Iterate iterate;
+  if (Kokkos::is_layouttiled<typename DstType::array_layout>::value) {
+    iterate = Kokkos::layout_iterate_type_selector<
+        typename DstType::array_layout>::outer_iteration_pattern;
+  } else if (std::is_same<typename DstType::array_layout,
+                          Kokkos::LayoutRight>::value) {
+    iterate = Kokkos::Iterate::Right;
+  } else if (std::is_same<typename DstType::array_layout,
+                          Kokkos::LayoutLeft>::value) {
+    iterate = Kokkos::Iterate::Left;
+  } else if (std::is_same<typename DstType::array_layout,
+                          Kokkos::LayoutStride>::value) {
+    if (strides[0] > strides[DstType::Rank - 1])
+      iterate = Kokkos::Iterate::Right;
+    else
+      iterate = Kokkos::Iterate::Left;
+  } else {
+    if (std::is_same<typename DstType::execution_space::array_layout,
+                     Kokkos::LayoutRight>::value)
+      iterate = Kokkos::Iterate::Right;
+    else
+      iterate = Kokkos::Iterate::Left;
+  }
+
+  if ((dst.span() >= size_t(std::numeric_limits<int>::max())) ||
+      (src.span() >= size_t(std::numeric_limits<int>::max()))) {
+    if (DstExecCanAccessSrc) {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutRight, dst_execution_space, DstType::Rank, int64_t>(
+            dst, src);
+      else
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutLeft, dst_execution_space, DstType::Rank, int64_t>(
+            dst, src);
+    } else {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutRight, src_execution_space, DstType::Rank, int64_t>(
+            dst, src);
+      else
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutLeft, src_execution_space, DstType::Rank, int64_t>(
+            dst, src);
+    }
+  } else {
+    if (DstExecCanAccessSrc) {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutRight, dst_execution_space, DstType::Rank, int>(dst,
+                                                                          src);
+      else
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutLeft, dst_execution_space, DstType::Rank, int>(dst,
+                                                                         src);
+    } else {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutRight, src_execution_space, DstType::Rank, int>(dst,
+                                                                          src);
+      else
+        Kokkos::Impl::ViewCopy<
+            typename DstType::uniform_runtime_nomemspace_type,
+            typename SrcType::uniform_runtime_const_nomemspace_type,
+            Kokkos::LayoutLeft, src_execution_space, DstType::Rank, int>(dst,
+                                                                         src);
+    }
+  }
+}
+
+template <class DstType, class SrcType, int Rank, class... Args>
+struct CommonSubview;
+
+template <class DstType, class SrcType, class Arg0, class... Args>
+struct CommonSubview<DstType, SrcType, 1, Arg0, Args...> {
+  using dst_subview_type = typename Kokkos::Subview<DstType, Arg0>;
+  using src_subview_type = typename Kokkos::Subview<SrcType, Arg0>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                Args...)
+      : dst_sub(dst, arg0), src_sub(src, arg0) {}
+};
+
+template <class DstType, class SrcType, class Arg0, class Arg1, class... Args>
+struct CommonSubview<DstType, SrcType, 2, Arg0, Arg1, Args...> {
+  using dst_subview_type = typename Kokkos::Subview<DstType, Arg0, Arg1>;
+  using src_subview_type = typename Kokkos::Subview<SrcType, Arg0, Arg1>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                const Arg1& arg1, Args...)
+      : dst_sub(dst, arg0, arg1), src_sub(src, arg0, arg1) {}
+};
+
+template <class DstType, class SrcType, class Arg0, class Arg1, class Arg2,
+          class... Args>
+struct CommonSubview<DstType, SrcType, 3, Arg0, Arg1, Arg2, Args...> {
+  using dst_subview_type = typename Kokkos::Subview<DstType, Arg0, Arg1, Arg2>;
+  using src_subview_type = typename Kokkos::Subview<SrcType, Arg0, Arg1, Arg2>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                const Arg1& arg1, const Arg2& arg2, Args...)
+      : dst_sub(dst, arg0, arg1, arg2), src_sub(src, arg0, arg1, arg2) {}
+};
+
+template <class DstType, class SrcType, class Arg0, class Arg1, class Arg2,
+          class Arg3, class... Args>
+struct CommonSubview<DstType, SrcType, 4, Arg0, Arg1, Arg2, Arg3, Args...> {
+  using dst_subview_type =
+      typename Kokkos::Subview<DstType, Arg0, Arg1, Arg2, Arg3>;
+  using src_subview_type =
+      typename Kokkos::Subview<SrcType, Arg0, Arg1, Arg2, Arg3>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+                const Args...)
+      : dst_sub(dst, arg0, arg1, arg2, arg3),
+        src_sub(src, arg0, arg1, arg2, arg3) {}
+};
+
+template <class DstType, class SrcType, class Arg0, class Arg1, class Arg2,
+          class Arg3, class Arg4, class... Args>
+struct CommonSubview<DstType, SrcType, 5, Arg0, Arg1, Arg2, Arg3, Arg4,
+                     Args...> {
+  using dst_subview_type =
+      typename Kokkos::Subview<DstType, Arg0, Arg1, Arg2, Arg3, Arg4>;
+  using src_subview_type =
+      typename Kokkos::Subview<SrcType, Arg0, Arg1, Arg2, Arg3, Arg4>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+                const Arg4& arg4, const Args...)
+      : dst_sub(dst, arg0, arg1, arg2, arg3, arg4),
+        src_sub(src, arg0, arg1, arg2, arg3, arg4) {}
+};
+
+template <class DstType, class SrcType, class Arg0, class Arg1, class Arg2,
+          class Arg3, class Arg4, class Arg5, class... Args>
+struct CommonSubview<DstType, SrcType, 6, Arg0, Arg1, Arg2, Arg3, Arg4, Arg5,
+                     Args...> {
+  using dst_subview_type =
+      typename Kokkos::Subview<DstType, Arg0, Arg1, Arg2, Arg3, Arg4, Arg5>;
+  using src_subview_type =
+      typename Kokkos::Subview<SrcType, Arg0, Arg1, Arg2, Arg3, Arg4, Arg5>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+                const Arg4& arg4, const Arg5& arg5, const Args...)
+      : dst_sub(dst, arg0, arg1, arg2, arg3, arg4, arg5),
+        src_sub(src, arg0, arg1, arg2, arg3, arg4, arg5) {}
+};
+
+template <class DstType, class SrcType, class Arg0, class Arg1, class Arg2,
+          class Arg3, class Arg4, class Arg5, class Arg6, class... Args>
+struct CommonSubview<DstType, SrcType, 7, Arg0, Arg1, Arg2, Arg3, Arg4, Arg5,
+                     Arg6, Args...> {
+  using dst_subview_type = typename Kokkos::Subview<DstType, Arg0, Arg1, Arg2,
+                                                    Arg3, Arg4, Arg5, Arg6>;
+  using src_subview_type = typename Kokkos::Subview<SrcType, Arg0, Arg1, Arg2,
+                                                    Arg3, Arg4, Arg5, Arg6>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+                const Arg4& arg4, const Arg5& arg5, const Arg6& arg6, Args...)
+      : dst_sub(dst, arg0, arg1, arg2, arg3, arg4, arg5, arg6),
+        src_sub(src, arg0, arg1, arg2, arg3, arg4, arg5, arg6) {}
+};
+
+template <class DstType, class SrcType, class Arg0, class Arg1, class Arg2,
+          class Arg3, class Arg4, class Arg5, class Arg6, class Arg7>
+struct CommonSubview<DstType, SrcType, 8, Arg0, Arg1, Arg2, Arg3, Arg4, Arg5,
+                     Arg6, Arg7> {
+  using dst_subview_type =
+      typename Kokkos::Subview<DstType, Arg0, Arg1, Arg2, Arg3, Arg4, Arg5,
+                               Arg6, Arg7>;
+  using src_subview_type =
+      typename Kokkos::Subview<SrcType, Arg0, Arg1, Arg2, Arg3, Arg4, Arg5,
+                               Arg6, Arg7>;
+  dst_subview_type dst_sub;
+  src_subview_type src_sub;
+  CommonSubview(const DstType& dst, const SrcType& src, const Arg0& arg0,
+                const Arg1& arg1, const Arg2& arg2, const Arg3& arg3,
+                const Arg4& arg4, const Arg5& arg5, const Arg6& arg6,
+                const Arg7& arg7)
+      : dst_sub(dst, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7),
+        src_sub(src, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) {}
+};
+
+template <class DstType, class SrcType,
+          class ExecSpace = typename DstType::execution_space,
+          int Rank        = DstType::Rank>
+struct ViewRemap;
+
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 1> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      view_copy(exec_space..., dst, src);
+    } else {
+      p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+      using sv_adapter_type = CommonSubview<DstType, SrcType, 1, p_type>;
+      sv_adapter_type common_subview(dst, src, ext0);
+      view_copy(exec_space..., common_subview.dst_sub, common_subview.src_sub);
+    }
+  }
+};
+
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 2> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      if (dst.extent(1) == src.extent(1)) {
+        view_copy(exec_space..., dst, src);
+      } else {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 2, Kokkos::Impl::ALL_t, p_type>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    } else {
+      if (dst.extent(1) == src.extent(1)) {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 2, p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, ext0, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 2, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    }
+  }
+};
+
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 3> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      if (dst.extent(2) == src.extent(2)) {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 3, Kokkos::Impl::ALL_t, p_type,
+                          Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1,
+                                       Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 3, Kokkos::Impl::ALL_t, p_type,
+                          p_type>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    } else {
+      if (dst.extent(2) == src.extent(2)) {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        using sv_adapter_type = CommonSubview<DstType, SrcType, 3, p_type,
+                                              p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 3, p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    }
+  }
+};
+
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 4> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      if (dst.extent(3) == src.extent(3)) {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 4, Kokkos::Impl::ALL_t, p_type,
+                          p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2,
+                                       Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 4, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    } else {
+      if (dst.extent(7) == src.extent(7)) {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 4, p_type, p_type, p_type,
+                          Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 4, p_type, p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    }
+  }
+};
+
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 5> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      if (dst.extent(4) == src.extent(4)) {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 5, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 5, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       ext4);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    } else {
+      if (dst.extent(4) == src.extent(4)) {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 5, p_type, p_type, p_type, p_type,
+                          Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3,
+                                       Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        using sv_adapter_type = CommonSubview<DstType, SrcType, 5, p_type,
+                                              p_type, p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    }
+  }
+};
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 6> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      if (dst.extent(5) == src.extent(5)) {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 6, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       ext4, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 6, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       ext4, ext5);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    } else {
+      if (dst.extent(5) == src.extent(5)) {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 6, p_type, p_type, p_type, p_type,
+                          p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
+                                       Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 6, p_type, p_type, p_type, p_type,
+                          p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
+                                       ext5);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    }
+  }
+};
+
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 7> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      if (dst.extent(6) == src.extent(6)) {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 7, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, p_type, p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       ext4, ext5, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 7, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       ext4, ext5, ext6);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    } else {
+      if (dst.extent(6) == src.extent(6)) {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 7, p_type, p_type, p_type, p_type,
+                          p_type, p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
+                                       ext5, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 7, p_type, p_type, p_type, p_type,
+                          p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
+                                       ext5, ext6);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    }
+  }
+};
+
+template <class DstType, class SrcType, class ExecSpace>
+struct ViewRemap<DstType, SrcType, ExecSpace, 8> {
+  using p_type = Kokkos::pair<int64_t, int64_t>;
+
+  template <typename... OptExecSpace>
+  ViewRemap(const DstType& dst, const SrcType& src,
+            const OptExecSpace&... exec_space) {
+    static_assert(
+        sizeof...(OptExecSpace) <= 1,
+        "OptExecSpace must be either empty or be an execution space!");
+
+    if (dst.extent(0) == src.extent(0)) {
+      if (dst.extent(7) == src.extent(7)) {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 8, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, p_type, p_type, p_type,
+                          Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       ext4, ext5, ext6, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
+        p_type ext7(0, std::min(dst.extent(7), src.extent(7)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 8, Kokkos::Impl::ALL_t, p_type,
+                          p_type, p_type, p_type, p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, Kokkos::ALL, ext1, ext2, ext3,
+                                       ext4, ext5, ext6, ext7);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    } else {
+      if (dst.extent(7) == src.extent(7)) {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 8, p_type, p_type, p_type, p_type,
+                          p_type, p_type, p_type, Kokkos::Impl::ALL_t>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
+                                       ext5, ext6, Kokkos::ALL);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      } else {
+        p_type ext0(0, std::min(dst.extent(0), src.extent(0)));
+        p_type ext1(0, std::min(dst.extent(1), src.extent(1)));
+        p_type ext2(0, std::min(dst.extent(2), src.extent(2)));
+        p_type ext3(0, std::min(dst.extent(3), src.extent(3)));
+        p_type ext4(0, std::min(dst.extent(4), src.extent(4)));
+        p_type ext5(0, std::min(dst.extent(5), src.extent(5)));
+        p_type ext6(0, std::min(dst.extent(6), src.extent(6)));
+        p_type ext7(0, std::min(dst.extent(7), src.extent(7)));
+        using sv_adapter_type =
+            CommonSubview<DstType, SrcType, 8, p_type, p_type, p_type, p_type,
+                          p_type, p_type, p_type, p_type>;
+        sv_adapter_type common_subview(dst, src, ext0, ext1, ext2, ext3, ext4,
+                                       ext5, ext6, ext7);
+        view_copy(exec_space..., common_subview.dst_sub,
+                  common_subview.src_sub);
+      }
+    }
+  }
+};
+
+template <typename ExecutionSpace, class DT, class... DP>
+inline void contiguous_fill(
+    const ExecutionSpace& exec_space, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value) {
+  using ViewType     = View<DT, DP...>;
+  using ViewTypeFlat = Kokkos::View<
+      typename ViewType::value_type*, Kokkos::LayoutRight,
+      Kokkos::Device<typename ViewType::execution_space,
+                     std::conditional_t<ViewType::Rank == 0,
+                                        typename ViewType::memory_space,
+                                        Kokkos::AnonymousSpace>>,
+      Kokkos::MemoryTraits<0>>;
+
+  ViewTypeFlat dst_flat(dst.data(), dst.size());
+  if (dst.span() < static_cast<size_t>(std::numeric_limits<int>::max())) {
+    Kokkos::Impl::ViewFill<ViewTypeFlat, Kokkos::LayoutRight, ExecutionSpace,
+                           ViewTypeFlat::Rank, int>(dst_flat, value,
+                                                    exec_space);
+  } else
+    Kokkos::Impl::ViewFill<ViewTypeFlat, Kokkos::LayoutRight, ExecutionSpace,
+                           ViewTypeFlat::Rank, int64_t>(dst_flat, value,
+                                                        exec_space);
+}
+
+template <typename ExecutionSpace, class DT, class... DP>
+struct ZeroMemset {
+  ZeroMemset(const ExecutionSpace& exec_space, const View<DT, DP...>& dst,
+             typename ViewTraits<DT, DP...>::const_value_type& value) {
+    contiguous_fill(exec_space, dst, value);
+  }
+
+  ZeroMemset(const View<DT, DP...>& dst,
+             typename ViewTraits<DT, DP...>::const_value_type& value) {
+    contiguous_fill(ExecutionSpace(), dst, value);
+  }
+};
+
+template <typename ExecutionSpace, class DT, class... DP>
+inline std::enable_if_t<
+    std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
+    std::is_trivially_copy_assignable<
+        typename ViewTraits<DT, DP...>::value_type>::value>
+contiguous_fill_or_memset(
+    const ExecutionSpace& exec_space, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value) {
+// On A64FX memset seems to do the wrong thing with regards to first touch
+// leading to the significant performance issues
+#ifndef KOKKOS_ARCH_A64FX
+  if (Impl::is_zero_byte(value))
+    ZeroMemset<ExecutionSpace, DT, DP...>(exec_space, dst, value);
+  else
+#endif
+    contiguous_fill(exec_space, dst, value);
+}
+
+template <typename ExecutionSpace, class DT, class... DP>
+inline std::enable_if_t<
+    !(std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
+      std::is_trivially_copy_assignable<
+          typename ViewTraits<DT, DP...>::value_type>::value)>
+contiguous_fill_or_memset(
+    const ExecutionSpace& exec_space, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value) {
+  contiguous_fill(exec_space, dst, value);
+}
+
+template <class DT, class... DP>
+inline std::enable_if_t<
+    std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
+    std::is_trivially_copy_assignable<
+        typename ViewTraits<DT, DP...>::value_type>::value>
+contiguous_fill_or_memset(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value) {
+  using ViewType        = View<DT, DP...>;
+  using exec_space_type = typename ViewType::execution_space;
+
+// On A64FX memset seems to do the wrong thing with regards to first touch
+// leading to the significant performance issues
+#ifndef KOKKOS_ARCH_A64FX
+  if (Impl::is_zero_byte(value))
+    ZeroMemset<exec_space_type, DT, DP...>(dst, value);
+  else
+#endif
+    contiguous_fill(exec_space_type(), dst, value);
+}
+
+template <class DT, class... DP>
+inline std::enable_if_t<
+    !(std::is_trivial<typename ViewTraits<DT, DP...>::value_type>::value &&
+      std::is_trivially_copy_assignable<
+          typename ViewTraits<DT, DP...>::value_type>::value)>
+contiguous_fill_or_memset(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value) {
+  using ViewType        = View<DT, DP...>;
+  using exec_space_type = typename ViewType::execution_space;
+
+  contiguous_fill(exec_space_type(), dst, value);
+}
+}  // namespace Impl
+
+/** \brief  Deep copy a value from Host memory into a view.  */
+template <class DT, class... DP>
+inline void deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  using ViewType        = View<DT, DP...>;
+  using exec_space_type = typename ViewType::execution_space;
+
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(ViewType::memory_space::name()),
+        dst.label(), dst.data(),
+        Kokkos::Profiling::make_space_handle(Kokkos::HostSpace::name()),
+        "Scalar", &value, dst.span() * sizeof(typename ViewType::value_type));
+  }
+
+  if (dst.data() == nullptr) {
+    Kokkos::fence(
+        "Kokkos::deep_copy: scalar copy, fence because destination is null");
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  Kokkos::fence("Kokkos::deep_copy: scalar copy, pre copy fence");
+  static_assert(std::is_same<typename ViewType::non_const_value_type,
+                             typename ViewType::value_type>::value,
+                "deep_copy requires non-const type");
+
+  // If contiguous we can simply do a 1D flat loop or use memset
+  if (dst.span_is_contiguous()) {
+    Impl::contiguous_fill_or_memset(dst, value);
+    Kokkos::fence("Kokkos::deep_copy: scalar copy, post copy fence");
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  // Figure out iteration order to do the ViewFill
+  int64_t strides[ViewType::Rank + 1];
+  dst.stride(strides);
+  Kokkos::Iterate iterate;
+  if (std::is_same<typename ViewType::array_layout,
+                   Kokkos::LayoutRight>::value) {
+    iterate = Kokkos::Iterate::Right;
+  } else if (std::is_same<typename ViewType::array_layout,
+                          Kokkos::LayoutLeft>::value) {
+    iterate = Kokkos::Iterate::Left;
+  } else if (std::is_same<typename ViewType::array_layout,
+                          Kokkos::LayoutStride>::value) {
+    if (strides[0] > strides[ViewType::Rank > 0 ? ViewType::Rank - 1 : 0])
+      iterate = Kokkos::Iterate::Right;
+    else
+      iterate = Kokkos::Iterate::Left;
+  } else {
+    if (std::is_same<typename ViewType::execution_space::array_layout,
+                     Kokkos::LayoutRight>::value)
+      iterate = Kokkos::Iterate::Right;
+    else
+      iterate = Kokkos::Iterate::Left;
+  }
+
+  // Lets call the right ViewFill functor based on integer space needed and
+  // iteration type
+  using ViewTypeUniform =
+      std::conditional_t<ViewType::Rank == 0,
+                         typename ViewType::uniform_runtime_type,
+                         typename ViewType::uniform_runtime_nomemspace_type>;
+  if (dst.span() > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    if (iterate == Kokkos::Iterate::Right)
+      Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight,
+                             exec_space_type, ViewType::Rank, int64_t>(
+          dst, value, exec_space_type());
+    else
+      Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft,
+                             exec_space_type, ViewType::Rank, int64_t>(
+          dst, value, exec_space_type());
+  } else {
+    if (iterate == Kokkos::Iterate::Right)
+      Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight,
+                             exec_space_type, ViewType::Rank, int>(
+          dst, value, exec_space_type());
+    else
+      Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft,
+                             exec_space_type, ViewType::Rank, int>(
+          dst, value, exec_space_type());
+  }
+  Kokkos::fence("Kokkos::deep_copy: scalar copy, post copy fence");
+
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+/** \brief  Deep copy into a value in Host memory from a view.  */
+template <class ST, class... SP>
+inline void deep_copy(
+    typename ViewTraits<ST, SP...>::non_const_value_type& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<std::is_same<typename ViewTraits<ST, SP...>::specialize,
+                                  void>::value>* = nullptr) {
+  using src_traits       = ViewTraits<ST, SP...>;
+  using src_memory_space = typename src_traits::memory_space;
+
+  static_assert(src_traits::rank == 0,
+                "ERROR: Non-rank-zero view in deep_copy( value , View )");
+
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(Kokkos::HostSpace::name()),
+        "Scalar", &dst,
+        Kokkos::Profiling::make_space_handle(src_memory_space::name()),
+        src.label(), src.data(),
+        src.span() * sizeof(typename src_traits::value_type));
+  }
+
+  if (src.data() == nullptr) {
+    Kokkos::fence("Kokkos::deep_copy: copy into scalar, src is null");
+  } else {
+    Kokkos::fence("Kokkos::deep_copy: copy into scalar, pre copy fence");
+    Kokkos::Impl::DeepCopy<HostSpace, src_memory_space>(&dst, src.data(),
+                                                        sizeof(ST));
+    Kokkos::fence("Kokkos::deep_copy: copy into scalar, post copy fence");
+  }
+
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+//----------------------------------------------------------------------------
+/** \brief  A deep copy between views of compatible type, and rank zero.  */
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<
+        (std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+         std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+         (unsigned(ViewTraits<DT, DP...>::rank) == unsigned(0) &&
+          unsigned(ViewTraits<ST, SP...>::rank) == unsigned(0)))>* = nullptr) {
+  using dst_type = View<DT, DP...>;
+  using src_type = View<ST, SP...>;
+
+  using value_type       = typename dst_type::value_type;
+  using dst_memory_space = typename dst_type::memory_space;
+  using src_memory_space = typename src_type::memory_space;
+
+  static_assert(std::is_same<typename dst_type::value_type,
+                             typename src_type::non_const_value_type>::value,
+                "deep_copy requires matching non-const destination type");
+
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(dst_memory_space::name()),
+        dst.label(), dst.data(),
+        Kokkos::Profiling::make_space_handle(src_memory_space::name()),
+        src.label(), src.data(),
+        src.span() * sizeof(typename dst_type::value_type));
+  }
+
+  if (dst.data() == nullptr && src.data() == nullptr) {
+    Kokkos::fence(
+        "Kokkos::deep_copy: scalar to scalar copy, both pointers null");
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  Kokkos::fence("Kokkos::deep_copy: scalar to scalar copy, pre copy fence");
+  if (dst.data() != src.data()) {
+    Kokkos::Impl::DeepCopy<dst_memory_space, src_memory_space>(
+        dst.data(), src.data(), sizeof(value_type));
+    Kokkos::fence("Kokkos::deep_copy: scalar to scalar copy, post copy fence");
+  }
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+//----------------------------------------------------------------------------
+/** \brief  A deep copy between views of the default specialization, compatible
+ * type, same non-zero rank, same contiguous layout.
+ */
+template <class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<
+        (std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+         std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+         (unsigned(ViewTraits<DT, DP...>::rank) != 0 ||
+          unsigned(ViewTraits<ST, SP...>::rank) != 0))>* = nullptr) {
+  using dst_type            = View<DT, DP...>;
+  using src_type            = View<ST, SP...>;
+  using dst_execution_space = typename dst_type::execution_space;
+  using src_execution_space = typename src_type::execution_space;
+  using dst_memory_space    = typename dst_type::memory_space;
+  using src_memory_space    = typename src_type::memory_space;
+  using dst_value_type      = typename dst_type::value_type;
+  using src_value_type      = typename src_type::value_type;
+
+  static_assert(std::is_same<typename dst_type::value_type,
+                             typename dst_type::non_const_value_type>::value,
+                "deep_copy requires non-const destination type");
+
+  static_assert((unsigned(dst_type::rank) == unsigned(src_type::rank)),
+                "deep_copy requires Views of equal rank");
+
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(dst_memory_space::name()),
+        dst.label(), dst.data(),
+        Kokkos::Profiling::make_space_handle(src_memory_space::name()),
+        src.label(), src.data(),
+        src.span() * sizeof(typename dst_type::value_type));
+  }
+
+  if (dst.data() == nullptr || src.data() == nullptr) {
+    // throw if dimension mismatch
+    if ((src.extent(0) != dst.extent(0)) || (src.extent(1) != dst.extent(1)) ||
+        (src.extent(2) != dst.extent(2)) || (src.extent(3) != dst.extent(3)) ||
+        (src.extent(4) != dst.extent(4)) || (src.extent(5) != dst.extent(5)) ||
+        (src.extent(6) != dst.extent(6)) || (src.extent(7) != dst.extent(7))) {
+      std::string message(
+          "Deprecation Error: Kokkos::deep_copy extents of views don't "
+          "match: ");
+      message += dst.label();
+      message += "(";
+      for (int r = 0; r < dst_type::Rank - 1; r++) {
+        message += std::to_string(dst.extent(r));
+        message += ",";
+      }
+      message += std::to_string(dst.extent(dst_type::Rank - 1));
+      message += ") ";
+      message += src.label();
+      message += "(";
+      for (int r = 0; r < src_type::Rank - 1; r++) {
+        message += std::to_string(src.extent(r));
+        message += ",";
+      }
+      message += std::to_string(src.extent(src_type::Rank - 1));
+      message += ") ";
+
+      Kokkos::Impl::throw_runtime_exception(message);
+    }
+    Kokkos::fence(
+        "Kokkos::deep_copy: copy between contiguous views, fence due to null "
+        "argument");
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  enum {
+    DstExecCanAccessSrc =
+        Kokkos::SpaceAccessibility<dst_execution_space,
+                                   src_memory_space>::accessible
+  };
+
+  enum {
+    SrcExecCanAccessDst =
+        Kokkos::SpaceAccessibility<src_execution_space,
+                                   dst_memory_space>::accessible
+  };
+
+  // Checking for Overlapping Views.
+  dst_value_type* dst_start = dst.data();
+  dst_value_type* dst_end   = dst.data() + dst.span();
+  src_value_type* src_start = src.data();
+  src_value_type* src_end   = src.data() + src.span();
+  if (((std::ptrdiff_t)dst_start == (std::ptrdiff_t)src_start) &&
+      ((std::ptrdiff_t)dst_end == (std::ptrdiff_t)src_end) &&
+      (dst.span_is_contiguous() && src.span_is_contiguous())) {
+    Kokkos::fence(
+        "Kokkos::deep_copy: copy between contiguous views, fence due to same "
+        "spans");
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  if ((((std::ptrdiff_t)dst_start < (std::ptrdiff_t)src_end) &&
+       ((std::ptrdiff_t)dst_end > (std::ptrdiff_t)src_start)) &&
+      ((dst.span_is_contiguous() && src.span_is_contiguous()))) {
+    std::string message("Error: Kokkos::deep_copy of overlapping views: ");
+    message += dst.label();
+    message += "(";
+    message += std::to_string((std::ptrdiff_t)dst_start);
+    message += ",";
+    message += std::to_string((std::ptrdiff_t)dst_end);
+    message += ") ";
+    message += src.label();
+    message += "(";
+    message += std::to_string((std::ptrdiff_t)src_start);
+    message += ",";
+    message += std::to_string((std::ptrdiff_t)src_end);
+    message += ") ";
+    Kokkos::Impl::throw_runtime_exception(message);
+  }
+
+  // Check for same extents
+  if ((src.extent(0) != dst.extent(0)) || (src.extent(1) != dst.extent(1)) ||
+      (src.extent(2) != dst.extent(2)) || (src.extent(3) != dst.extent(3)) ||
+      (src.extent(4) != dst.extent(4)) || (src.extent(5) != dst.extent(5)) ||
+      (src.extent(6) != dst.extent(6)) || (src.extent(7) != dst.extent(7))) {
+    std::string message(
+        "Deprecation Error: Kokkos::deep_copy extents of views don't match: ");
+    message += dst.label();
+    message += "(";
+    for (int r = 0; r < dst_type::Rank - 1; r++) {
+      message += std::to_string(dst.extent(r));
+      message += ",";
+    }
+    message += std::to_string(dst.extent(dst_type::Rank - 1));
+    message += ") ";
+    message += src.label();
+    message += "(";
+    for (int r = 0; r < src_type::Rank - 1; r++) {
+      message += std::to_string(src.extent(r));
+      message += ",";
+    }
+    message += std::to_string(src.extent(src_type::Rank - 1));
+    message += ") ";
+
+    Kokkos::Impl::throw_runtime_exception(message);
+  }
+
+  // If same type, equal layout, equal dimensions, equal span, and contiguous
+  // memory then can byte-wise copy
+
+  if (std::is_same<typename dst_type::value_type,
+                   typename src_type::non_const_value_type>::value &&
+      (std::is_same<typename dst_type::array_layout,
+                    typename src_type::array_layout>::value ||
+       (dst_type::rank == 1 && src_type::rank == 1)) &&
+      dst.span_is_contiguous() && src.span_is_contiguous() &&
+      ((dst_type::rank < 1) || (dst.stride_0() == src.stride_0())) &&
+      ((dst_type::rank < 2) || (dst.stride_1() == src.stride_1())) &&
+      ((dst_type::rank < 3) || (dst.stride_2() == src.stride_2())) &&
+      ((dst_type::rank < 4) || (dst.stride_3() == src.stride_3())) &&
+      ((dst_type::rank < 5) || (dst.stride_4() == src.stride_4())) &&
+      ((dst_type::rank < 6) || (dst.stride_5() == src.stride_5())) &&
+      ((dst_type::rank < 7) || (dst.stride_6() == src.stride_6())) &&
+      ((dst_type::rank < 8) || (dst.stride_7() == src.stride_7()))) {
+    const size_t nbytes = sizeof(typename dst_type::value_type) * dst.span();
+    Kokkos::fence(
+        "Kokkos::deep_copy: copy between contiguous views, pre view equality "
+        "check");
+    if ((void*)dst.data() != (void*)src.data()) {
+      Kokkos::Impl::DeepCopy<dst_memory_space, src_memory_space>(
+          dst.data(), src.data(), nbytes);
+      Kokkos::fence(
+          "Kokkos::deep_copy: copy between contiguous views, post deep copy "
+          "fence");
+    }
+  } else {
+    Kokkos::fence(
+        "Kokkos::deep_copy: copy between contiguous views, pre copy fence");
+    Impl::view_copy(dst, src);
+    Kokkos::fence(
+        "Kokkos::deep_copy: copy between contiguous views, post copy fence");
+  }
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+namespace Experimental {
+/** \brief  A local deep copy between views of the default specialization,
+ * compatible type, same non-zero rank.
+ */
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION
+local_deep_copy_contiguous(const TeamType& team, const View<DT, DP...>& dst,
+                           const View<ST, SP...>& src) {
+  Kokkos::parallel_for(Kokkos::TeamVectorRange(team, src.span()),
+                       [&](const int& i) { dst.data()[i] = src.data()[i]; });
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy_contiguous(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src) {
+  for (size_t i = 0; i < src.span(); ++i) {
+    dst.data()[i] = src.data()[i];
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 1 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 1)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0);
+
+  team.team_barrier();
+  Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N),
+                       [&](const int& i) { dst(i) = src(i); });
+  team.team_barrier();
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 2 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 2)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1);
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, src);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0      = i % dst.extent(0);
+      int i1      = i / dst.extent(0);
+      dst(i0, i1) = src(i0, i1);
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 3 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 3)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2);
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, src);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0          = i % dst.extent(0);
+      int itmp        = i / dst.extent(0);
+      int i1          = itmp % dst.extent(1);
+      int i2          = itmp / dst.extent(1);
+      dst(i0, i1, i2) = src(i0, i1, i2);
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 4 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 4)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N =
+      dst.extent(0) * dst.extent(1) * dst.extent(2) * dst.extent(3);
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, src);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0              = i % dst.extent(0);
+      int itmp            = i / dst.extent(0);
+      int i1              = itmp % dst.extent(1);
+      itmp                = itmp / dst.extent(1);
+      int i2              = itmp % dst.extent(2);
+      int i3              = itmp / dst.extent(2);
+      dst(i0, i1, i2, i3) = src(i0, i1, i2, i3);
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 5 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 5)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2) *
+                   dst.extent(3) * dst.extent(4);
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, src);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0                  = i % dst.extent(0);
+      int itmp                = i / dst.extent(0);
+      int i1                  = itmp % dst.extent(1);
+      itmp                    = itmp / dst.extent(1);
+      int i2                  = itmp % dst.extent(2);
+      itmp                    = itmp / dst.extent(2);
+      int i3                  = itmp % dst.extent(3);
+      int i4                  = itmp / dst.extent(3);
+      dst(i0, i1, i2, i3, i4) = src(i0, i1, i2, i3, i4);
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 6 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 6)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2) *
+                   dst.extent(3) * dst.extent(4) * dst.extent(5);
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, src);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0                      = i % dst.extent(0);
+      int itmp                    = i / dst.extent(0);
+      int i1                      = itmp % dst.extent(1);
+      itmp                        = itmp / dst.extent(1);
+      int i2                      = itmp % dst.extent(2);
+      itmp                        = itmp / dst.extent(2);
+      int i3                      = itmp % dst.extent(3);
+      itmp                        = itmp / dst.extent(3);
+      int i4                      = itmp % dst.extent(4);
+      int i5                      = itmp / dst.extent(4);
+      dst(i0, i1, i2, i3, i4, i5) = src(i0, i1, i2, i3, i4, i5);
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 7 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 7)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2) *
+                   dst.extent(3) * dst.extent(4) * dst.extent(5) *
+                   dst.extent(6);
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, src);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0                          = i % dst.extent(0);
+      int itmp                        = i / dst.extent(0);
+      int i1                          = itmp % dst.extent(1);
+      itmp                            = itmp / dst.extent(1);
+      int i2                          = itmp % dst.extent(2);
+      itmp                            = itmp / dst.extent(2);
+      int i3                          = itmp % dst.extent(3);
+      itmp                            = itmp / dst.extent(3);
+      int i4                          = itmp % dst.extent(4);
+      itmp                            = itmp / dst.extent(4);
+      int i5                          = itmp % dst.extent(5);
+      int i6                          = itmp / dst.extent(5);
+      dst(i0, i1, i2, i3, i4, i5, i6) = src(i0, i1, i2, i3, i4, i5, i6);
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 1 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 1)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0);
+
+  for (size_t i = 0; i < N; ++i) {
+    dst(i) = src(i);
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 2 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 2)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, src);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1) dst(i0, i1) = src(i0, i1);
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 3 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 3)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, src);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          dst(i0, i1, i2) = src(i0, i1, i2);
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 4 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 4)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, src);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            dst(i0, i1, i2, i3) = src(i0, i1, i2, i3);
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 5 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 5)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, src);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            for (size_t i4 = 0; i4 < dst.extent(4); ++i4)
+              dst(i0, i1, i2, i3, i4) = src(i0, i1, i2, i3, i4);
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 6 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 6)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, src);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            for (size_t i4 = 0; i4 < dst.extent(4); ++i4)
+              for (size_t i5 = 0; i5 < dst.extent(5); ++i5)
+                dst(i0, i1, i2, i3, i4, i5) = src(i0, i1, i2, i3, i4, i5);
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP, class ST, class... SP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst, const View<ST, SP...>& src,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 7 &&
+                      unsigned(ViewTraits<ST, SP...>::rank) == 7)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous() && src.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, src);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            for (size_t i4 = 0; i4 < dst.extent(4); ++i4)
+              for (size_t i5 = 0; i5 < dst.extent(5); ++i5)
+                for (size_t i6 = 0; i6 < dst.extent(6); ++i6)
+                  dst(i0, i1, i2, i3, i4, i5, i6) =
+                      src(i0, i1, i2, i3, i4, i5, i6);
+  }
+}
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+/** \brief  Deep copy a value into a view.  */
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy_contiguous(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  Kokkos::parallel_for(Kokkos::TeamVectorRange(team, dst.span()),
+                       [&](const int& i) { dst.data()[i] = value; });
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy_contiguous(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<std::is_same<typename ViewTraits<DT, DP...>::specialize,
+                                  void>::value>* = nullptr) {
+  for (size_t i = 0; i < dst.span(); ++i) {
+    dst.data()[i] = value;
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 1)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0);
+
+  team.team_barrier();
+  Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N),
+                       [&](const int& i) { dst(i) = value; });
+  team.team_barrier();
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 2)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1);
+
+  if (dst.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, value);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0      = i % dst.extent(0);
+      int i1      = i / dst.extent(0);
+      dst(i0, i1) = value;
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 3)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2);
+
+  if (dst.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, value);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0          = i % dst.extent(0);
+      int itmp        = i / dst.extent(0);
+      int i1          = itmp % dst.extent(1);
+      int i2          = itmp / dst.extent(1);
+      dst(i0, i1, i2) = value;
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 4)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N =
+      dst.extent(0) * dst.extent(1) * dst.extent(2) * dst.extent(3);
+
+  if (dst.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, value);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0              = i % dst.extent(0);
+      int itmp            = i / dst.extent(0);
+      int i1              = itmp % dst.extent(1);
+      itmp                = itmp / dst.extent(1);
+      int i2              = itmp % dst.extent(2);
+      int i3              = itmp / dst.extent(2);
+      dst(i0, i1, i2, i3) = value;
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 5)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2) *
+                   dst.extent(3) * dst.extent(4);
+
+  if (dst.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, value);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0                  = i % dst.extent(0);
+      int itmp                = i / dst.extent(0);
+      int i1                  = itmp % dst.extent(1);
+      itmp                    = itmp / dst.extent(1);
+      int i2                  = itmp % dst.extent(2);
+      itmp                    = itmp / dst.extent(2);
+      int i3                  = itmp % dst.extent(3);
+      int i4                  = itmp / dst.extent(3);
+      dst(i0, i1, i2, i3, i4) = value;
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 6)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2) *
+                   dst.extent(3) * dst.extent(4) * dst.extent(5);
+
+  if (dst.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, value);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0                      = i % dst.extent(0);
+      int itmp                    = i / dst.extent(0);
+      int i1                      = itmp % dst.extent(1);
+      itmp                        = itmp / dst.extent(1);
+      int i2                      = itmp % dst.extent(2);
+      itmp                        = itmp / dst.extent(2);
+      int i3                      = itmp % dst.extent(3);
+      itmp                        = itmp / dst.extent(3);
+      int i4                      = itmp % dst.extent(4);
+      int i5                      = itmp / dst.extent(4);
+      dst(i0, i1, i2, i3, i4, i5) = value;
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class TeamType, class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const TeamType& team, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 7)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0) * dst.extent(1) * dst.extent(2) *
+                   dst.extent(3) * dst.extent(4) * dst.extent(5) *
+                   dst.extent(6);
+
+  if (dst.span_is_contiguous()) {
+    team.team_barrier();
+    local_deep_copy_contiguous(team, dst, value);
+    team.team_barrier();
+  } else {
+    team.team_barrier();
+    Kokkos::parallel_for(Kokkos::TeamVectorRange(team, N), [&](const int& i) {
+      int i0                          = i % dst.extent(0);
+      int itmp                        = i / dst.extent(0);
+      int i1                          = itmp % dst.extent(1);
+      itmp                            = itmp / dst.extent(1);
+      int i2                          = itmp % dst.extent(2);
+      itmp                            = itmp / dst.extent(2);
+      int i3                          = itmp % dst.extent(3);
+      itmp                            = itmp / dst.extent(3);
+      int i4                          = itmp % dst.extent(4);
+      itmp                            = itmp / dst.extent(4);
+      int i5                          = itmp % dst.extent(5);
+      int i6                          = itmp / dst.extent(5);
+      dst(i0, i1, i2, i3, i4, i5, i6) = value;
+    });
+    team.team_barrier();
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 1)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  const size_t N = dst.extent(0);
+
+  for (size_t i = 0; i < N; ++i) {
+    dst(i) = value;
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 2)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, value);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1) dst(i0, i1) = value;
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 3)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, value);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2) dst(i0, i1, i2) = value;
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 4)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, value);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            dst(i0, i1, i2, i3) = value;
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 5)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, value);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            for (size_t i4 = 0; i4 < dst.extent(4); ++i4)
+              dst(i0, i1, i2, i3, i4) = value;
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 6)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, value);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            for (size_t i4 = 0; i4 < dst.extent(4); ++i4)
+              for (size_t i5 = 0; i5 < dst.extent(5); ++i5)
+                dst(i0, i1, i2, i3, i4, i5) = value;
+  }
+}
+//----------------------------------------------------------------------------
+template <class DT, class... DP>
+void KOKKOS_INLINE_FUNCTION local_deep_copy(
+    const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<(unsigned(ViewTraits<DT, DP...>::rank) == 7)>* = nullptr) {
+  if (dst.data() == nullptr) {
+    return;
+  }
+
+  if (dst.span_is_contiguous()) {
+    local_deep_copy_contiguous(dst, value);
+  } else {
+    for (size_t i0 = 0; i0 < dst.extent(0); ++i0)
+      for (size_t i1 = 0; i1 < dst.extent(1); ++i1)
+        for (size_t i2 = 0; i2 < dst.extent(2); ++i2)
+          for (size_t i3 = 0; i3 < dst.extent(3); ++i3)
+            for (size_t i4 = 0; i4 < dst.extent(4); ++i4)
+              for (size_t i5 = 0; i5 < dst.extent(5); ++i5)
+                for (size_t i6 = 0; i6 < dst.extent(6); ++i6)
+                  dst(i0, i1, i2, i3, i4, i5, i6) = value;
+  }
+}
+} /* namespace Experimental */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/** \brief  Deep copy a value from Host memory into a view. ExecSpace can access
+ * dst */
+template <class ExecSpace, class DT, class... DP>
+inline void deep_copy(
+    const ExecSpace& space, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<
+        Kokkos::is_execution_space<ExecSpace>::value &&
+        std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+        Kokkos::SpaceAccessibility<ExecSpace, typename ViewTraits<DT, DP...>::
+                                                  memory_space>::accessible>* =
+        nullptr) {
+  using dst_traits = ViewTraits<DT, DP...>;
+  static_assert(std::is_same<typename dst_traits::non_const_value_type,
+                             typename dst_traits::value_type>::value,
+                "deep_copy requires non-const type");
+  using dst_memory_space = typename dst_traits::memory_space;
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(dst_memory_space::name()),
+        dst.label(), dst.data(),
+        Kokkos::Profiling::make_space_handle(Kokkos::HostSpace::name()),
+        "(none)", &value, dst.span() * sizeof(typename dst_traits::value_type));
+  }
+  if (dst.data() == nullptr) {
+    space.fence("Kokkos::deep_copy: scalar copy on space, dst data is null");
+  } else if (dst.span_is_contiguous()) {
+    Impl::contiguous_fill_or_memset(space, dst, value);
+  } else {
+    using ViewType = View<DT, DP...>;
+    // Figure out iteration order to do the ViewFill
+    int64_t strides[ViewType::Rank + 1];
+    dst.stride(strides);
+    Kokkos::Iterate iterate;
+    if (std::is_same<typename ViewType::array_layout,
+                     Kokkos::LayoutRight>::value) {
+      iterate = Kokkos::Iterate::Right;
+    } else if (std::is_same<typename ViewType::array_layout,
+                            Kokkos::LayoutLeft>::value) {
+      iterate = Kokkos::Iterate::Left;
+    } else if (std::is_same<typename ViewType::array_layout,
+                            Kokkos::LayoutStride>::value) {
+      if (strides[0] > strides[ViewType::Rank > 0 ? ViewType::Rank - 1 : 0])
+        iterate = Kokkos::Iterate::Right;
+      else
+        iterate = Kokkos::Iterate::Left;
+    } else {
+      if (std::is_same<typename ViewType::execution_space::array_layout,
+                       Kokkos::LayoutRight>::value)
+        iterate = Kokkos::Iterate::Right;
+      else
+        iterate = Kokkos::Iterate::Left;
+    }
+
+    // Lets call the right ViewFill functor based on integer space needed and
+    // iteration type
+    using ViewTypeUniform =
+        std::conditional_t<ViewType::Rank == 0,
+                           typename ViewType::uniform_runtime_type,
+                           typename ViewType::uniform_runtime_nomemspace_type>;
+    if (dst.span() > static_cast<size_t>(std::numeric_limits<int32_t>::max())) {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight, ExecSpace,
+                               ViewType::Rank, int64_t>(dst, value, space);
+      else
+        Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft, ExecSpace,
+                               ViewType::Rank, int64_t>(dst, value, space);
+    } else {
+      if (iterate == Kokkos::Iterate::Right)
+        Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutRight, ExecSpace,
+                               ViewType::Rank, int32_t>(dst, value, space);
+      else
+        Kokkos::Impl::ViewFill<ViewTypeUniform, Kokkos::LayoutLeft, ExecSpace,
+                               ViewType::Rank, int32_t>(dst, value, space);
+    }
+  }
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+/** \brief  Deep copy a value from Host memory into a view. ExecSpace can not
+ * access dst */
+template <class ExecSpace, class DT, class... DP>
+inline void deep_copy(
+    const ExecSpace& space, const View<DT, DP...>& dst,
+    typename ViewTraits<DT, DP...>::const_value_type& value,
+    std::enable_if_t<
+        Kokkos::is_execution_space<ExecSpace>::value &&
+        std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+        !Kokkos::SpaceAccessibility<ExecSpace, typename ViewTraits<DT, DP...>::
+                                                   memory_space>::accessible>* =
+        nullptr) {
+  using dst_traits = ViewTraits<DT, DP...>;
+  static_assert(std::is_same<typename dst_traits::non_const_value_type,
+                             typename dst_traits::value_type>::value,
+                "deep_copy requires non-const type");
+  using dst_memory_space = typename dst_traits::memory_space;
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(dst_memory_space::name()),
+        dst.label(), dst.data(),
+        Kokkos::Profiling::make_space_handle(Kokkos::HostSpace::name()),
+        "(none)", &value, dst.span() * sizeof(typename dst_traits::value_type));
+  }
+  if (dst.data() == nullptr) {
+    space.fence(
+        "Kokkos::deep_copy: scalar-to-view copy on space, dst data is null");
+  } else {
+    space.fence("Kokkos::deep_copy: scalar-to-view copy on space, pre copy");
+    using fill_exec_space = typename dst_traits::memory_space::execution_space;
+    if (dst.span_is_contiguous()) {
+      Impl::contiguous_fill_or_memset(fill_exec_space(), dst, value);
+    } else {
+      using ViewTypeUniform = std::conditional_t<
+          View<DT, DP...>::Rank == 0,
+          typename View<DT, DP...>::uniform_runtime_type,
+          typename View<DT, DP...>::uniform_runtime_nomemspace_type>;
+      Kokkos::Impl::ViewFill<ViewTypeUniform, typename dst_traits::array_layout,
+                             fill_exec_space>(dst, value, fill_exec_space());
+    }
+    fill_exec_space().fence(
+        "Kokkos::deep_copy: scalar-to-view copy on space, fence after fill");
+  }
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+/** \brief  Deep copy into a value in Host memory from a view.  */
+template <class ExecSpace, class ST, class... SP>
+inline void deep_copy(
+    const ExecSpace& exec_space,
+    typename ViewTraits<ST, SP...>::non_const_value_type& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<Kokkos::is_execution_space<ExecSpace>::value &&
+                     std::is_same<typename ViewTraits<ST, SP...>::specialize,
+                                  void>::value>* = nullptr) {
+  using src_traits       = ViewTraits<ST, SP...>;
+  using src_memory_space = typename src_traits::memory_space;
+  static_assert(src_traits::rank == 0,
+                "ERROR: Non-rank-zero view in deep_copy( value , View )");
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(Kokkos::HostSpace::name()),
+        "(none)", &dst,
+        Kokkos::Profiling::make_space_handle(src_memory_space::name()),
+        src.label(), src.data(), sizeof(ST));
+  }
+
+  if (src.data() == nullptr) {
+    exec_space.fence(
+        "Kokkos::deep_copy: view-to-scalar copy on space, src data is null");
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  Kokkos::Impl::DeepCopy<HostSpace, src_memory_space, ExecSpace>(
+      exec_space, &dst, src.data(), sizeof(ST));
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+//----------------------------------------------------------------------------
+/** \brief  A deep copy between views of compatible type, and rank zero.  */
+template <class ExecSpace, class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+    const ExecSpace& exec_space, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<
+        (Kokkos::is_execution_space<ExecSpace>::value &&
+         std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+         std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+         (unsigned(ViewTraits<DT, DP...>::rank) == unsigned(0) &&
+          unsigned(ViewTraits<ST, SP...>::rank) == unsigned(0)))>* = nullptr) {
+  using src_traits = ViewTraits<ST, SP...>;
+  using dst_traits = ViewTraits<DT, DP...>;
+
+  using src_memory_space = typename src_traits::memory_space;
+  using dst_memory_space = typename dst_traits::memory_space;
+  static_assert(std::is_same<typename dst_traits::value_type,
+                             typename src_traits::non_const_value_type>::value,
+                "deep_copy requires matching non-const destination type");
+
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(dst_memory_space::name()),
+        dst.label(), dst.data(),
+        Kokkos::Profiling::make_space_handle(src_memory_space::name()),
+        src.label(), src.data(), sizeof(DT));
+  }
+
+  if (dst.data() == nullptr && src.data() == nullptr) {
+    exec_space.fence(
+        "Kokkos::deep_copy: view-to-view copy on space, data is null");
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  if (dst.data() != src.data()) {
+    Kokkos::Impl::DeepCopy<dst_memory_space, src_memory_space, ExecSpace>(
+        exec_space, dst.data(), src.data(),
+        sizeof(typename dst_traits::value_type));
+  }
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+//----------------------------------------------------------------------------
+/** \brief  A deep copy between views of the default specialization, compatible
+ * type, same non-zero rank
+ */
+template <class ExecSpace, class DT, class... DP, class ST, class... SP>
+inline void deep_copy(
+    const ExecSpace& exec_space, const View<DT, DP...>& dst,
+    const View<ST, SP...>& src,
+    std::enable_if_t<
+        (Kokkos::is_execution_space<ExecSpace>::value &&
+         std::is_void<typename ViewTraits<DT, DP...>::specialize>::value &&
+         std::is_void<typename ViewTraits<ST, SP...>::specialize>::value &&
+         (unsigned(ViewTraits<DT, DP...>::rank) != 0 ||
+          unsigned(ViewTraits<ST, SP...>::rank) != 0))>* = nullptr) {
+  using dst_type = View<DT, DP...>;
+  using src_type = View<ST, SP...>;
+
+  static_assert(std::is_same<typename dst_type::value_type,
+                             typename dst_type::non_const_value_type>::value,
+                "deep_copy requires non-const destination type");
+
+  static_assert((unsigned(dst_type::rank) == unsigned(src_type::rank)),
+                "deep_copy requires Views of equal rank");
+
+  using dst_execution_space = typename dst_type::execution_space;
+  using src_execution_space = typename src_type::execution_space;
+  using dst_memory_space    = typename dst_type::memory_space;
+  using src_memory_space    = typename src_type::memory_space;
+  using dst_value_type      = typename dst_type::value_type;
+  using src_value_type      = typename src_type::value_type;
+
+  if (Kokkos::Tools::Experimental::get_callbacks().begin_deep_copy != nullptr) {
+    Kokkos::Profiling::beginDeepCopy(
+        Kokkos::Profiling::make_space_handle(dst_memory_space::name()),
+        dst.label(), dst.data(),
+        Kokkos::Profiling::make_space_handle(src_memory_space::name()),
+        src.label(), src.data(), dst.span() * sizeof(dst_value_type));
+  }
+
+  dst_value_type* dst_start = dst.data();
+  dst_value_type* dst_end   = dst.data() + dst.span();
+  src_value_type* src_start = src.data();
+  src_value_type* src_end   = src.data() + src.span();
+
+  // Early dropout if identical range
+  if ((dst_start == nullptr || src_start == nullptr) ||
+      ((std::ptrdiff_t(dst_start) == std::ptrdiff_t(src_start)) &&
+       (std::ptrdiff_t(dst_end) == std::ptrdiff_t(src_end)))) {
+    // throw if dimension mismatch
+    if ((src.extent(0) != dst.extent(0)) || (src.extent(1) != dst.extent(1)) ||
+        (src.extent(2) != dst.extent(2)) || (src.extent(3) != dst.extent(3)) ||
+        (src.extent(4) != dst.extent(4)) || (src.extent(5) != dst.extent(5)) ||
+        (src.extent(6) != dst.extent(6)) || (src.extent(7) != dst.extent(7))) {
+      std::string message(
+          "Deprecation Error: Kokkos::deep_copy extents of views don't "
+          "match: ");
+      message += dst.label();
+      message += "(";
+      for (int r = 0; r < dst_type::Rank - 1; r++) {
+        message += std::to_string(dst.extent(r));
+        message += ",";
+      }
+      message += std::to_string(dst.extent(dst_type::Rank - 1));
+      message += ") ";
+      message += src.label();
+      message += "(";
+      for (int r = 0; r < src_type::Rank - 1; r++) {
+        message += std::to_string(src.extent(r));
+        message += ",";
+      }
+      message += std::to_string(src.extent(src_type::Rank - 1));
+      message += ") ";
+
+      Kokkos::Impl::throw_runtime_exception(message);
+    }
+    if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+      Kokkos::Profiling::endDeepCopy();
+    }
+    return;
+  }
+
+  enum {
+    ExecCanAccessSrcDst =
+        Kokkos::SpaceAccessibility<ExecSpace, dst_memory_space>::accessible &&
+        Kokkos::SpaceAccessibility<ExecSpace, src_memory_space>::accessible
+  };
+  enum {
+    DstExecCanAccessSrc =
+        Kokkos::SpaceAccessibility<dst_execution_space,
+                                   src_memory_space>::accessible
+  };
+
+  enum {
+    SrcExecCanAccessDst =
+        Kokkos::SpaceAccessibility<src_execution_space,
+                                   dst_memory_space>::accessible
+  };
+
+  // Error out for non-identical overlapping views.
+  if ((((std::ptrdiff_t)dst_start < (std::ptrdiff_t)src_end) &&
+       ((std::ptrdiff_t)dst_end > (std::ptrdiff_t)src_start)) &&
+      ((dst.span_is_contiguous() && src.span_is_contiguous()))) {
+    std::string message("Error: Kokkos::deep_copy of overlapping views: ");
+    message += dst.label();
+    message += "(";
+    message += std::to_string((std::ptrdiff_t)dst_start);
+    message += ",";
+    message += std::to_string((std::ptrdiff_t)dst_end);
+    message += ") ";
+    message += src.label();
+    message += "(";
+    message += std::to_string((std::ptrdiff_t)src_start);
+    message += ",";
+    message += std::to_string((std::ptrdiff_t)src_end);
+    message += ") ";
+    Kokkos::Impl::throw_runtime_exception(message);
+  }
+
+  // Check for same extents
+  if ((src.extent(0) != dst.extent(0)) || (src.extent(1) != dst.extent(1)) ||
+      (src.extent(2) != dst.extent(2)) || (src.extent(3) != dst.extent(3)) ||
+      (src.extent(4) != dst.extent(4)) || (src.extent(5) != dst.extent(5)) ||
+      (src.extent(6) != dst.extent(6)) || (src.extent(7) != dst.extent(7))) {
+    std::string message(
+        "Deprecation Error: Kokkos::deep_copy extents of views don't match: ");
+    message += dst.label();
+    message += "(";
+    for (int r = 0; r < dst_type::Rank - 1; r++) {
+      message += std::to_string(dst.extent(r));
+      message += ",";
+    }
+    message += std::to_string(dst.extent(dst_type::Rank - 1));
+    message += ") ";
+    message += src.label();
+    message += "(";
+    for (int r = 0; r < src_type::Rank - 1; r++) {
+      message += std::to_string(src.extent(r));
+      message += ",";
+    }
+    message += std::to_string(src.extent(src_type::Rank - 1));
+    message += ") ";
+
+    Kokkos::Impl::throw_runtime_exception(message);
+  }
+
+  // If same type, equal layout, equal dimensions, equal span, and contiguous
+  // memory then can byte-wise copy
+
+  if (std::is_same<typename dst_type::value_type,
+                   typename src_type::non_const_value_type>::value &&
+      (std::is_same<typename dst_type::array_layout,
+                    typename src_type::array_layout>::value ||
+       (dst_type::rank == 1 && src_type::rank == 1)) &&
+      dst.span_is_contiguous() && src.span_is_contiguous() &&
+      ((dst_type::rank < 1) || (dst.stride_0() == src.stride_0())) &&
+      ((dst_type::rank < 2) || (dst.stride_1() == src.stride_1())) &&
+      ((dst_type::rank < 3) || (dst.stride_2() == src.stride_2())) &&
+      ((dst_type::rank < 4) || (dst.stride_3() == src.stride_3())) &&
+      ((dst_type::rank < 5) || (dst.stride_4() == src.stride_4())) &&
+      ((dst_type::rank < 6) || (dst.stride_5() == src.stride_5())) &&
+      ((dst_type::rank < 7) || (dst.stride_6() == src.stride_6())) &&
+      ((dst_type::rank < 8) || (dst.stride_7() == src.stride_7()))) {
+    const size_t nbytes = sizeof(typename dst_type::value_type) * dst.span();
+    if ((void*)dst.data() != (void*)src.data()) {
+      Kokkos::Impl::DeepCopy<dst_memory_space, src_memory_space, ExecSpace>(
+          exec_space, dst.data(), src.data(), nbytes);
+    }
+  } else {
+    // Copying data between views in accessible memory spaces and either
+    // non-contiguous or incompatible shape.
+    if (ExecCanAccessSrcDst) {
+      Impl::view_copy(exec_space, dst, src);
+    } else if (DstExecCanAccessSrc || SrcExecCanAccessDst) {
+      using cpy_exec_space =
+          std::conditional_t<DstExecCanAccessSrc, dst_execution_space,
+                             src_execution_space>;
+      exec_space.fence(
+          "Kokkos::deep_copy: view-to-view noncontiguous copy on space, pre "
+          "copy");
+      Impl::view_copy(cpy_exec_space(), dst, src);
+      cpy_exec_space().fence(
+          "Kokkos::deep_copy: view-to-view noncontiguous copy on space, post "
+          "copy");
+    } else {
+      Kokkos::Impl::throw_runtime_exception(
+          "deep_copy given views that would require a temporary allocation");
+    }
+  }
+  if (Kokkos::Tools::Experimental::get_callbacks().end_deep_copy != nullptr) {
+    Kokkos::Profiling::endDeepCopy();
+  }
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+template <typename ViewType>
+bool size_mismatch(const ViewType& view, unsigned int max_extent,
+                   const size_t new_extents[8]) {
+  for (unsigned int dim = 0; dim < max_extent; ++dim)
+    if (new_extents[dim] != view.extent(dim)) {
+      return true;
+    }
+  for (unsigned int dim = max_extent; dim < 8; ++dim)
+    if (new_extents[dim] != KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+      return true;
+    }
+  return false;
+}
+
+}  // namespace Impl
+
+/** \brief  Resize a view with copying old data to new data at the corresponding
+ * indices. */
+template <class T, class... P, class... ViewCtorArgs>
+inline typename std::enable_if<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value>::type
+impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+            Kokkos::View<T, P...>& v, const size_t n0, const size_t n1,
+            const size_t n2, const size_t n3, const size_t n4, const size_t n5,
+            const size_t n6, const size_t n7) {
+  using view_type        = Kokkos::View<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only resize managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::resize "
+                "must not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a memory space instance!");
+
+  // TODO (mfh 27 Jun 2017) If the old View has enough space but just
+  // different dimensions (e.g., if the product of the dimensions,
+  // including extra space for alignment, will not change), then
+  // consider just reusing storage.  For now, Kokkos always
+  // reallocates if any of the dimensions change, even if the old View
+  // has enough space.
+
+  const size_t new_extents[8] = {n0, n1, n2, n3, n4, n5, n6, n7};
+  const bool sizeMismatch = Impl::size_mismatch(v, v.rank_dynamic, new_extents);
+
+  if (sizeMismatch) {
+    // Add execution space here to avoid the need for if constexpr below
+    using alloc_prop = Impl::ViewCtorProp<
+        ViewCtorArgs..., std::string,
+        std::conditional_t<alloc_prop_input::has_execution_space,
+                           std::integral_constant<unsigned int, 10>,
+                           typename view_type::execution_space>>;
+    alloc_prop prop_copy(arg_prop);
+    static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+        v.label();
+
+    view_type v_resized(prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
+
+    if (alloc_prop_input::has_execution_space)
+      Kokkos::Impl::ViewRemap<view_type, view_type>(
+          v_resized, v,
+          static_cast<const Impl::ViewCtorProp<
+              void, typename alloc_prop::execution_space>&>(prop_copy)
+              .value);
+    else {
+      Kokkos::Impl::ViewRemap<view_type, view_type>(v_resized, v);
+      Kokkos::fence("Kokkos::resize(View)");
+    }
+
+    v = v_resized;
+  }
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value>
+resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+       Kokkos::View<T, P...>& v, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+  impl_resize(arg_prop, v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class T, class... P>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value>
+resize(Kokkos::View<T, P...>& v, const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+  impl_resize(Impl::ViewCtorProp<>{}, v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<
+    (Impl::is_view_ctor_property<I>::value ||
+     Kokkos::is_execution_space<I>::value) &&
+    (std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                  Kokkos::LayoutLeft>::value ||
+     std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                  Kokkos::LayoutRight>::value)>
+resize(const I& arg_prop, Kokkos::View<T, P...>& v,
+       const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+       const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+  impl_resize(Kokkos::view_alloc(arg_prop), v, n0, n1, n2, n3, n4, n5, n6, n7);
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutStride>::value ||
+    is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value>
+impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+            Kokkos::View<T, P...>& v,
+            const typename Kokkos::View<T, P...>::array_layout& layout) {
+  using view_type        = Kokkos::View<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only resize managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::resize "
+                "must not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a memory space instance!");
+
+  if (v.layout() != layout) {
+    // Add execution space here to avoid the need for if constexpr below
+    using alloc_prop = Impl::ViewCtorProp<
+        ViewCtorArgs..., std::string,
+        std::conditional_t<alloc_prop_input::has_execution_space,
+                           std::integral_constant<unsigned int, 10>,
+                           typename view_type::execution_space>>;
+    alloc_prop prop_copy(arg_prop);
+    static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+        v.label();
+
+    view_type v_resized(prop_copy, layout);
+
+    if (alloc_prop::has_execution_space)
+      Kokkos::Impl::ViewRemap<view_type, view_type>(
+          v_resized, v,
+          static_cast<const Impl::ViewCtorProp<
+              void, typename alloc_prop::execution_space>&>(prop_copy)
+              .value);
+    else {
+      Kokkos::Impl::ViewRemap<view_type, view_type>(v_resized, v);
+      Kokkos::fence("Kokkos::resize(View)");
+    }
+
+    v = v_resized;
+  }
+}
+
+// FIXME User-provided (custom) layouts are not required to have a comparison
+// operator. Hence, there is no way to check if the requested layout is actually
+// the same as the existing one.
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    !(std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                   Kokkos::LayoutLeft>::value ||
+      std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                   Kokkos::LayoutRight>::value ||
+      std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                   Kokkos::LayoutStride>::value ||
+      is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value)>
+impl_resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+            Kokkos::View<T, P...>& v,
+            const typename Kokkos::View<T, P...>::array_layout& layout) {
+  using view_type        = Kokkos::View<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only resize managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::resize "
+                "must not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::resize must "
+                "not include a memory space instance!");
+
+  // Add execution space here to avoid the need for if constexpr below
+  using alloc_prop = Impl::ViewCtorProp<
+      ViewCtorArgs..., std::string,
+      std::conditional_t<alloc_prop_input::has_execution_space,
+                         std::integral_constant<unsigned int, 10>,
+                         typename view_type::execution_space>>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      v.label();
+
+  view_type v_resized(prop_copy, layout);
+
+  if (alloc_prop::has_execution_space)
+    Kokkos::Impl::ViewRemap<view_type, view_type>(
+        v_resized, v,
+        static_cast<const Impl::ViewCtorProp<
+            void, typename alloc_prop::execution_space>&>(prop_copy)
+            .value);
+  else {
+    Kokkos::Impl::ViewRemap<view_type, view_type>(v_resized, v);
+    Kokkos::fence("Kokkos::resize(View)");
+  }
+
+  v = v_resized;
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline void resize(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+                   Kokkos::View<T, P...>& v,
+                   const typename Kokkos::View<T, P...>::array_layout& layout) {
+  impl_resize(arg_prop, v, layout);
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<Impl::is_view_ctor_property<I>::value ||
+                        Kokkos::is_execution_space<I>::value>
+resize(const I& arg_prop, Kokkos::View<T, P...>& v,
+       const typename Kokkos::View<T, P...>::array_layout& layout) {
+  impl_resize(arg_prop, v, layout);
+}
+
+template <class ExecutionSpace, class T, class... P>
+inline void resize(const ExecutionSpace& exec_space, Kokkos::View<T, P...>& v,
+                   const typename Kokkos::View<T, P...>::array_layout& layout) {
+  impl_resize(Impl::ViewCtorProp<>(), exec_space, v, layout);
+}
+
+template <class T, class... P>
+inline void resize(Kokkos::View<T, P...>& v,
+                   const typename Kokkos::View<T, P...>::array_layout& layout) {
+  impl_resize(Impl::ViewCtorProp<>{}, v, layout);
+}
+
+/** \brief  Resize a view with discarding old data. */
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value>
+impl_realloc(Kokkos::View<T, P...>& v, const size_t n0, const size_t n1,
+             const size_t n2, const size_t n3, const size_t n4, const size_t n5,
+             const size_t n6, const size_t n7,
+             const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using view_type        = Kokkos::View<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only realloc managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a memory space instance!");
+
+  const size_t new_extents[8] = {n0, n1, n2, n3, n4, n5, n6, n7};
+  const bool sizeMismatch = Impl::size_mismatch(v, v.rank_dynamic, new_extents);
+
+  if (sizeMismatch) {
+    using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+    alloc_prop arg_prop_copy(arg_prop);
+    static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
+        .value = v.label();
+    v = view_type();  // Best effort to deallocate in case no other view refers
+                      // to the shared allocation
+    v = view_type(arg_prop_copy, n0, n1, n2, n3, n4, n5, n6, n7);
+  } else if (alloc_prop_input::initialize) {
+    if (alloc_prop_input::has_execution_space) {
+      using alloc_prop = Impl::ViewCtorProp<
+          ViewCtorArgs...,
+          std::conditional_t<alloc_prop_input::has_execution_space,
+                             std::integral_constant<unsigned int, 2>,
+                             typename view_type::execution_space>>;
+      alloc_prop arg_prop_copy(arg_prop);
+      auto const& exec_space = static_cast<Kokkos::Impl::ViewCtorProp<
+          void, typename alloc_prop::execution_space> const&>(arg_prop_copy)
+                                   .value;
+      Kokkos::deep_copy(exec_space, v, typename view_type::value_type{});
+    } else
+      Kokkos::deep_copy(v, typename view_type::value_type{});
+  }
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value>
+realloc(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+        Kokkos::View<T, P...>& v,
+        const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+  impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, arg_prop);
+}
+
+template <class T, class... P>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value>
+realloc(Kokkos::View<T, P...>& v,
+        const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+  impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Impl::ViewCtorProp<>{});
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<
+    Impl::is_view_ctor_property<I>::value &&
+    (std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                  Kokkos::LayoutLeft>::value ||
+     std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                  Kokkos::LayoutRight>::value)>
+realloc(const I& arg_prop, Kokkos::View<T, P...>& v,
+        const size_t n0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+        const size_t n7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG) {
+  impl_realloc(v, n0, n1, n2, n3, n4, n5, n6, n7, Kokkos::view_alloc(arg_prop));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutLeft>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutRight>::value ||
+    std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                 Kokkos::LayoutStride>::value ||
+    is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value>
+impl_realloc(Kokkos::View<T, P...>& v,
+             const typename Kokkos::View<T, P...>::array_layout& layout,
+             const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using view_type        = Kokkos::View<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only realloc managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a memory space instance!");
+
+  if (v.layout() != layout) {
+    v = view_type();  // Deallocate first, if the only view to allocation
+    v = view_type(arg_prop, layout);
+  } else if (alloc_prop_input::initialize) {
+    if (alloc_prop_input::has_execution_space) {
+      // Add execution_space if not provided to avoid need for if constexpr
+      using alloc_prop = Impl::ViewCtorProp<
+          ViewCtorArgs...,
+          std::conditional_t<alloc_prop_input::has_execution_space,
+                             std::integral_constant<unsigned int, 2>,
+                             typename view_type::execution_space>,
+          std::string>;
+      alloc_prop arg_prop_copy(arg_prop);
+      static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
+          .value                 = v.label();
+      using execution_space_type = typename alloc_prop::execution_space;
+      const execution_space_type& exec_space =
+          static_cast<
+              Kokkos::Impl::ViewCtorProp<void, execution_space_type> const&>(
+              arg_prop_copy)
+              .value;
+      Kokkos::deep_copy(exec_space, v, typename view_type::value_type{});
+    } else
+      Kokkos::deep_copy(v, typename view_type::value_type{});
+  }
+}
+
+// FIXME User-provided (custom) layouts are not required to have a comparison
+// operator. Hence, there is no way to check if the requested layout is actually
+// the same as the existing one.
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    !(std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                   Kokkos::LayoutLeft>::value ||
+      std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                   Kokkos::LayoutRight>::value ||
+      std::is_same<typename Kokkos::View<T, P...>::array_layout,
+                   Kokkos::LayoutStride>::value ||
+      is_layouttiled<typename Kokkos::View<T, P...>::array_layout>::value)>
+impl_realloc(Kokkos::View<T, P...>& v,
+             const typename Kokkos::View<T, P...>::array_layout& layout,
+             const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using view_type        = Kokkos::View<T, P...>;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(Kokkos::ViewTraits<T, P...>::is_managed,
+                "Can only realloc managed views");
+  static_assert(!alloc_prop_input::has_label,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a label!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::has_memory_space,
+                "The view constructor arguments passed to Kokkos::realloc must "
+                "not include a memory space instance!");
+
+  v = view_type();  // Deallocate first, if the only view to allocation
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop arg_prop_copy(arg_prop);
+  static_cast<Kokkos::Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy)
+      .value = v.label();
+  v          = view_type(arg_prop_copy, layout);
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline void realloc(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    Kokkos::View<T, P...>& v,
+    const typename Kokkos::View<T, P...>::array_layout& layout) {
+  impl_realloc(v, layout, arg_prop);
+}
+
+template <class I, class T, class... P>
+inline std::enable_if_t<Impl::is_view_ctor_property<I>::value> realloc(
+    const I& arg_prop, Kokkos::View<T, P...>& v,
+    const typename Kokkos::View<T, P...>::array_layout& layout) {
+  impl_realloc(v, layout, Kokkos::view_alloc(arg_prop));
+}
+
+template <class T, class... P>
+inline void realloc(
+    Kokkos::View<T, P...>& v,
+    const typename Kokkos::View<T, P...>::array_layout& layout) {
+  impl_realloc(v, layout, Impl::ViewCtorProp<>{});
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+// Deduce Mirror Types
+template <class Space, class T, class... P>
+struct MirrorViewType {
+  // The incoming view_type
+  using src_view_type = typename Kokkos::View<T, P...>;
+  // The memory space for the mirror view
+  using memory_space = typename Space::memory_space;
+  // Check whether it is the same memory space
+  enum {
+    is_same_memspace =
+        std::is_same<memory_space, typename src_view_type::memory_space>::value
+  };
+  // The array_layout
+  using array_layout = typename src_view_type::array_layout;
+  // The data type (we probably want it non-const since otherwise we can't even
+  // deep_copy to it.
+  using data_type = typename src_view_type::non_const_data_type;
+  // The destination view type if it is not the same memory space
+  using dest_view_type = Kokkos::View<data_type, array_layout, Space>;
+  // If it is the same memory_space return the existsing view_type
+  // This will also keep the unmanaged trait if necessary
+  using view_type =
+      std::conditional_t<is_same_memspace, src_view_type, dest_view_type>;
+};
+
+template <class Space, class T, class... P>
+struct MirrorType {
+  // The incoming view_type
+  using src_view_type = typename Kokkos::View<T, P...>;
+  // The memory space for the mirror view
+  using memory_space = typename Space::memory_space;
+  // Check whether it is the same memory space
+  enum {
+    is_same_memspace =
+        std::is_same<memory_space, typename src_view_type::memory_space>::value
+  };
+  // The array_layout
+  using array_layout = typename src_view_type::array_layout;
+  // The data type (we probably want it non-const since otherwise we can't even
+  // deep_copy to it.
+  using data_type = typename src_view_type::non_const_data_type;
+  // The destination view type if it is not the same memory space
+  using view_type = Kokkos::View<data_type, array_layout, Space>;
+};
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    !std::is_same<typename Kokkos::ViewTraits<T, P...>::array_layout,
+                  Kokkos::LayoutStride>::value &&
+        !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space,
+    typename Kokkos::View<T, P...>::HostMirror>
+create_mirror(const Kokkos::View<T, P...>& src,
+              const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using src_type         = View<T, P...>;
+  using dst_type         = typename src_type::HostMirror;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  return dst_type(
+      prop_copy,
+      src.rank_dynamic > 0 ? src.extent(0) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      src.rank_dynamic > 1 ? src.extent(1) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      src.rank_dynamic > 2 ? src.extent(2) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      src.rank_dynamic > 3 ? src.extent(3) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      src.rank_dynamic > 4 ? src.extent(4) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      src.rank_dynamic > 5 ? src.extent(5) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      src.rank_dynamic > 6 ? src.extent(6) : KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      src.rank_dynamic > 7 ? src.extent(7) : KOKKOS_IMPL_CTOR_DEFAULT_ARG);
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    std::is_same<typename Kokkos::ViewTraits<T, P...>::array_layout,
+                 Kokkos::LayoutStride>::value &&
+        !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space,
+    typename Kokkos::View<T, P...>::HostMirror>
+create_mirror(const Kokkos::View<T, P...>& src,
+              const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using src_type         = View<T, P...>;
+  using dst_type         = typename src_type::HostMirror;
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  Kokkos::LayoutStride layout;
+
+  layout.dimension[0] = src.extent(0);
+  layout.dimension[1] = src.extent(1);
+  layout.dimension[2] = src.extent(2);
+  layout.dimension[3] = src.extent(3);
+  layout.dimension[4] = src.extent(4);
+  layout.dimension[5] = src.extent(5);
+  layout.dimension[6] = src.extent(6);
+  layout.dimension[7] = src.extent(7);
+
+  layout.stride[0] = src.stride_0();
+  layout.stride[1] = src.stride_1();
+  layout.stride[2] = src.stride_2();
+  layout.stride[3] = src.stride_3();
+  layout.stride[4] = src.stride_4();
+  layout.stride[5] = src.stride_5();
+  layout.stride[6] = src.stride_6();
+  layout.stride[7] = src.stride_7();
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  return dst_type(prop_copy, layout);
+}
+
+// Create a mirror in a new space (specialization for different space)
+template <class T, class... P, class... ViewCtorArgs,
+          class Enable = std::enable_if_t<
+              Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>>
+auto create_mirror(const Kokkos::View<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+
+  static_assert(
+      !alloc_prop_input::has_label,
+      "The view constructor arguments passed to Kokkos::create_mirror "
+      "must not include a label!");
+  static_assert(
+      !alloc_prop_input::has_pointer,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not include a pointer!");
+  static_assert(
+      !alloc_prop_input::allow_padding,
+      "The view constructor arguments passed to Kokkos::create_mirror must "
+      "not explicitly allow padding!");
+
+  using alloc_prop = Impl::ViewCtorProp<ViewCtorArgs..., std::string>;
+  alloc_prop prop_copy(arg_prop);
+  static_cast<Impl::ViewCtorProp<void, std::string>&>(prop_copy).value =
+      std::string(src.label()).append("_mirror");
+
+  return typename Impl::MirrorType<typename alloc_prop::memory_space, T,
+                                   P...>::view_type(prop_copy, src.layout());
+}
+}  // namespace Impl
+
+template <class T, class... P>
+std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
+                 typename Kokkos::View<T, P...>::HostMirror>
+create_mirror(Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror(v, Impl::ViewCtorProp<>{});
+}
+
+template <class T, class... P>
+std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
+                 typename Kokkos::View<T, P...>::HostMirror>
+create_mirror(Kokkos::Impl::WithoutInitializing_t wi,
+              Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror(v, view_alloc(wi));
+}
+
+template <class Space, class T, class... P,
+          typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
+                 typename Impl::MirrorType<Space, T, P...>::view_type>
+create_mirror(Space const&, Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror(v, view_alloc(typename Space::memory_space{}));
+}
+
+template <class T, class... P, class... ViewCtorArgs,
+          typename Enable = std::enable_if_t<
+              std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+              Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space>>
+auto create_mirror(Impl::ViewCtorProp<ViewCtorArgs...> const& arg_prop,
+                   Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror(v, arg_prop);
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+std::enable_if_t<
+    std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        !Impl::ViewCtorProp<ViewCtorArgs...>::has_memory_space,
+    typename Kokkos::View<T, P...>::HostMirror>
+create_mirror(Impl::ViewCtorProp<ViewCtorArgs...> const& arg_prop,
+              Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror(v, arg_prop);
+}
+
+template <class Space, class T, class... P,
+          typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+std::enable_if_t<std::is_void<typename ViewTraits<T, P...>::specialize>::value,
+                 typename Impl::MirrorType<Space, T, P...>::view_type>
+create_mirror(Kokkos::Impl::WithoutInitializing_t wi, Space const&,
+              Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror(v, view_alloc(typename Space::memory_space{}, wi));
+}
+
+namespace Impl {
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    (std::is_same<
+         typename Kokkos::View<T, P...>::memory_space,
+         typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
+     std::is_same<
+         typename Kokkos::View<T, P...>::data_type,
+         typename Kokkos::View<T, P...>::HostMirror::data_type>::value),
+    typename Kokkos::View<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::View<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+inline std::enable_if_t<
+    !(std::is_same<
+          typename Kokkos::View<T, P...>::memory_space,
+          typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
+      std::is_same<
+          typename Kokkos::View<T, P...>::data_type,
+          typename Kokkos::View<T, P...>::HostMirror::data_type>::value),
+    typename Kokkos::View<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::View<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  return Kokkos::Impl::create_mirror(src, arg_prop);
+}
+
+// Create a mirror view in a new space (specialization for same space)
+template <class Space, class T, class... P, class... ViewCtorArgs>
+std::enable_if_t<Impl::MirrorViewType<Space, T, P...>::is_same_memspace,
+                 typename Impl::MirrorViewType<Space, T, P...>::view_type>
+create_mirror_view(const Space&, const Kokkos::View<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>&) {
+  return src;
+}
+
+// Create a mirror view in a new space (specialization for different space)
+template <class Space, class T, class... P, class... ViewCtorArgs>
+std::enable_if_t<!Impl::MirrorViewType<Space, T, P...>::is_same_memspace,
+                 typename Impl::MirrorViewType<Space, T, P...>::view_type>
+create_mirror_view(const Space&, const Kokkos::View<T, P...>& src,
+                   const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop) {
+  using MemorySpace = typename Space::memory_space;
+  using alloc_prop  = Impl::ViewCtorProp<ViewCtorArgs..., MemorySpace>;
+  alloc_prop prop_copy(arg_prop);
+
+  return Kokkos::Impl::create_mirror(src, prop_copy);
+}
+}  // namespace Impl
+
+template <class T, class... P>
+std::enable_if_t<
+    std::is_same<
+        typename Kokkos::View<T, P...>::memory_space,
+        typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
+        std::is_same<
+            typename Kokkos::View<T, P...>::data_type,
+            typename Kokkos::View<T, P...>::HostMirror::data_type>::value,
+    typename Kokkos::View<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::View<T, P...>& src) {
+  return src;
+}
+
+template <class T, class... P>
+std::enable_if_t<
+    !(std::is_same<
+          typename Kokkos::View<T, P...>::memory_space,
+          typename Kokkos::View<T, P...>::HostMirror::memory_space>::value &&
+      std::is_same<
+          typename Kokkos::View<T, P...>::data_type,
+          typename Kokkos::View<T, P...>::HostMirror::data_type>::value),
+    typename Kokkos::View<T, P...>::HostMirror>
+create_mirror_view(const Kokkos::View<T, P...>& src) {
+  return Kokkos::create_mirror(src);
+}
+
+template <class T, class... P>
+typename Kokkos::View<T, P...>::HostMirror create_mirror_view(
+    Kokkos::Impl::WithoutInitializing_t wi, Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror_view(v, view_alloc(wi));
+}
+
+// FIXME_C++17 Improve SFINAE here.
+template <class Space, class T, class... P,
+          class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
+    const Space&, const Kokkos::View<T, P...>& src,
+    std::enable_if_t<Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
+        nullptr) {
+  return src;
+}
+
+// FIXME_C++17 Improve SFINAE here.
+template <class Space, class T, class... P,
+          class Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
+    const Space& space, const Kokkos::View<T, P...>& src,
+    std::enable_if_t<!Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
+        nullptr) {
+  return Kokkos::create_mirror(space, src);
+}
+
+template <class Space, class T, class... P,
+          typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
+    Kokkos::Impl::WithoutInitializing_t wi, Space const& space,
+    Kokkos::View<T, P...> const& v) {
+  return Impl::create_mirror_view(space, v, view_alloc(wi));
+}
+
+template <class T, class... P, class... ViewCtorArgs>
+auto create_mirror_view(const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+                        const Kokkos::View<T, P...>& v) {
+  return Impl::create_mirror_view(v, arg_prop);
+}
+
+template <class... ViewCtorArgs, class T, class... P>
+auto create_mirror_view_and_copy(
+    const Impl::ViewCtorProp<ViewCtorArgs...>&,
+    const Kokkos::View<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        Impl::MirrorViewType<
+            typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+            P...>::is_same_memspace>* = nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+  static_assert(
+      alloc_prop_input::has_memory_space,
+      "The view constructor arguments passed to "
+      "Kokkos::create_mirror_view_and_copy must include a memory space!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::allow_padding,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not explicitly allow padding!");
+
+  // same behavior as deep_copy(src, src)
+  if (!alloc_prop_input::has_execution_space)
+    fence(
+        "Kokkos::create_mirror_view_and_copy: fence before returning src view");
+  return src;
+}
+
+template <class... ViewCtorArgs, class T, class... P>
+auto create_mirror_view_and_copy(
+    const Impl::ViewCtorProp<ViewCtorArgs...>& arg_prop,
+    const Kokkos::View<T, P...>& src,
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value &&
+        !Impl::MirrorViewType<
+            typename Impl::ViewCtorProp<ViewCtorArgs...>::memory_space, T,
+            P...>::is_same_memspace>* = nullptr) {
+  using alloc_prop_input = Impl::ViewCtorProp<ViewCtorArgs...>;
+  static_assert(
+      alloc_prop_input::has_memory_space,
+      "The view constructor arguments passed to "
+      "Kokkos::create_mirror_view_and_copy must include a memory space!");
+  static_assert(!alloc_prop_input::has_pointer,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not include a pointer!");
+  static_assert(!alloc_prop_input::allow_padding,
+                "The view constructor arguments passed to "
+                "Kokkos::create_mirror_view_and_copy must "
+                "not explicitly allow padding!");
+  using Space  = typename alloc_prop_input::memory_space;
+  using Mirror = typename Impl::MirrorViewType<Space, T, P...>::view_type;
+
+  // Add some properties if not provided to avoid need for if constexpr
+  using alloc_prop = Impl::ViewCtorProp<
+      ViewCtorArgs...,
+      std::conditional_t<alloc_prop_input::has_label,
+                         std::integral_constant<unsigned int, 12>, std::string>,
+      std::conditional_t<!alloc_prop_input::initialize,
+                         std::integral_constant<unsigned int, 13>,
+                         Impl::WithoutInitializing_t>,
+      std::conditional_t<alloc_prop_input::has_execution_space,
+                         std::integral_constant<unsigned int, 14>,
+                         typename Space::execution_space>>;
+  alloc_prop arg_prop_copy(arg_prop);
+
+  std::string& label =
+      static_cast<Impl::ViewCtorProp<void, std::string>&>(arg_prop_copy).value;
+  if (label.empty()) label = src.label();
+  auto mirror = typename Mirror::non_const_type{arg_prop_copy, src.layout()};
+  if (alloc_prop_input::has_execution_space) {
+    using ExecutionSpace = typename alloc_prop::execution_space;
+    deep_copy(
+        static_cast<Impl::ViewCtorProp<void, ExecutionSpace>&>(arg_prop_copy)
+            .value,
+        mirror, src);
+  } else
+    deep_copy(mirror, src);
+  return mirror;
+}
+
+// Previously when using auto here, the intel compiler 19.3 would
+// sometimes not create a symbol, guessing that it somehow is a combination
+// of auto and just forwarding arguments (see issue #5196)
+template <class Space, class T, class... P,
+          typename Enable = std::enable_if_t<Kokkos::is_space<Space>::value>>
+typename Impl::MirrorViewType<Space, T, P...>::view_type
+create_mirror_view_and_copy(
+    const Space&, const Kokkos::View<T, P...>& src,
+    std::string const& name = "",
+    std::enable_if_t<
+        std::is_void<typename ViewTraits<T, P...>::specialize>::value>* =
+        nullptr) {
+  return create_mirror_view_and_copy(
+      Kokkos::view_alloc(typename Space::memory_space{}, name), src);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+// Create a mirror view in a new space without initializing (specialization for
+// same space)
+template <class Space, class T, class... P>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the version taking WithoutInitializing as first argument")
+typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
+    const Space&, const Kokkos::View<T, P...>& src,
+    Kokkos::Impl::WithoutInitializing_t,
+    std::enable_if_t<Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
+        nullptr) {
+  return src;
+}
+
+// Create a mirror view in a new space without initializing (specialization for
+// different space)
+template <class Space, class T, class... P>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the version taking WithoutInitializing as first argument")
+typename Impl::MirrorViewType<Space, T, P...>::view_type create_mirror_view(
+    const Space&, const Kokkos::View<T, P...>& src,
+    Kokkos::Impl::WithoutInitializing_t,
+    std::enable_if_t<!Impl::MirrorViewType<Space, T, P...>::is_same_memspace>* =
+        nullptr) {
+  using Mirror = typename Impl::MirrorViewType<Space, T, P...>::view_type;
+  return Mirror(view_alloc(WithoutInitializing, src.label()), src.layout());
+}
+#endif
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Core.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Core.hpp
new file mode 100644 (file)
index 0000000..3a9aaaf
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CORE_HPP
+#define KOKKOS_CORE_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
+#endif
+
+//----------------------------------------------------------------------------
+// Include the execution space header files for the enabled execution spaces.
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <KokkosCore_Config_DeclareBackend.hpp>
+
+#include <Kokkos_Half.hpp>
+#include <Kokkos_AnonymousSpace.hpp>
+#include <Kokkos_LogicalSpaces.hpp>
+#include <Kokkos_Pair.hpp>
+#include <Kokkos_MinMaxClamp.hpp>
+#include <Kokkos_MathematicalConstants.hpp>
+#include <Kokkos_MathematicalFunctions.hpp>
+#include <Kokkos_MathematicalSpecialFunctions.hpp>
+#include <Kokkos_MemoryPool.hpp>
+#include <Kokkos_Array.hpp>
+#include <Kokkos_View.hpp>
+#include <Kokkos_Vectorization.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <Kokkos_hwloc.hpp>
+#include <Kokkos_Timer.hpp>
+#include <Kokkos_Tuners.hpp>
+#include <Kokkos_TaskScheduler.hpp>
+#include <Kokkos_Complex.hpp>
+#include <Kokkos_CopyViews.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+#include <functional>
+#include <iosfwd>
+#include <memory>
+#include <vector>
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+void initialize(int& argc, char* argv[]);
+
+void initialize(
+    InitializationSettings const& settings = InitializationSettings());
+
+namespace Impl {
+
+void pre_initialize(const InitializationSettings& settings);
+
+void post_initialize(const InitializationSettings& settings);
+
+void declare_configuration_metadata(const std::string& category,
+                                    const std::string& key,
+                                    const std::string& value);
+
+}  // namespace Impl
+
+KOKKOS_ATTRIBUTE_NODISCARD bool is_initialized() noexcept;
+KOKKOS_ATTRIBUTE_NODISCARD bool is_finalized() noexcept;
+
+bool show_warnings() noexcept;
+bool tune_internals() noexcept;
+
+/** \brief  Finalize the spaces that were initialized via Kokkos::initialize */
+void finalize();
+
+/**
+ * \brief Push a user-defined function to be called in
+ *   Kokkos::finalize, before any Kokkos state is finalized.
+ *
+ * \warning Only call this after Kokkos::initialize, but before
+ *   Kokkos::finalize.
+ *
+ * This function is the Kokkos analog to std::atexit.  If you call
+ * this with a function f, then your function will get called when
+ * Kokkos::finalize is called.  Specifically, it will be called BEFORE
+ * Kokkos does any finalization.  This means that all execution
+ * spaces, memory spaces, etc. that were initialized will still be
+ * initialized when your function is called.
+ *
+ * Just like std::atexit, if you call push_finalize_hook in sequence
+ * with multiple functions (f, g, h), Kokkos::finalize will call them
+ * in reverse order (h, g, f), as if popping a stack.  Furthermore,
+ * just like std::atexit, if any of your functions throws but does not
+ * catch an exception, Kokkos::finalize will call std::terminate.
+ */
+void push_finalize_hook(std::function<void()> f);
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+/** \brief  Finalize all known execution spaces */
+KOKKOS_DEPRECATED void finalize_all();
+#endif
+
+/** \brief Print "Bill of Materials" */
+void print_configuration(std::ostream& os, bool verbose = false);
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/* Allocate memory from a memory space.
+ * The allocation is tracked in Kokkos memory tracking system, so
+ * leaked memory can be identified.
+ */
+template <class Space = Kokkos::DefaultExecutionSpace::memory_space>
+inline void* kokkos_malloc(const std::string& arg_alloc_label,
+                           const size_t arg_alloc_size) {
+  using MemorySpace = typename Space::memory_space;
+  return Impl::SharedAllocationRecord<MemorySpace>::allocate_tracked(
+      MemorySpace(), arg_alloc_label, arg_alloc_size);
+}
+
+template <class Space = Kokkos::DefaultExecutionSpace::memory_space>
+inline void* kokkos_malloc(const size_t arg_alloc_size) {
+  using MemorySpace = typename Space::memory_space;
+  return Impl::SharedAllocationRecord<MemorySpace>::allocate_tracked(
+      MemorySpace(), "no-label", arg_alloc_size);
+}
+
+template <class Space = Kokkos::DefaultExecutionSpace::memory_space>
+inline void kokkos_free(void* arg_alloc) {
+  using MemorySpace = typename Space::memory_space;
+  return Impl::SharedAllocationRecord<MemorySpace>::deallocate_tracked(
+      arg_alloc);
+}
+
+template <class Space = Kokkos::DefaultExecutionSpace::memory_space>
+inline void* kokkos_realloc(void* arg_alloc, const size_t arg_alloc_size) {
+  using MemorySpace = typename Space::memory_space;
+  return Impl::SharedAllocationRecord<MemorySpace>::reallocate_tracked(
+      arg_alloc, arg_alloc_size);
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+/** \brief  ScopeGuard
+ *  Some user scope issues have been identified with some Kokkos::finalize
+ * calls; ScopeGuard aims to correct these issues.
+ *
+ *  Two requirements for ScopeGuard:
+ *     if Kokkos::is_initialized() in the constructor, don't call
+ * Kokkos::initialize or Kokkos::finalize it is not copyable or assignable
+ */
+namespace Impl {
+
+inline std::string scopeguard_correct_usage() {
+  return std::string(
+      "Do instead:\n"
+      "  std::unique_ptr<Kokkos::ScopeGuard> guard =\n"
+      "    !Kokkos::is_initialized() && !Kokkos::is_finalized()?\n"
+      "    new ScopeGuard(argc,argv) : nullptr;\n");
+}
+
+inline std::string scopeguard_create_while_initialized_warning() {
+  return std::string(
+             "Kokkos Error: Creating a ScopeGuard while Kokkos is initialized "
+             "is illegal.\n")
+      .append(scopeguard_correct_usage());
+}
+
+inline std::string scopeguard_create_after_finalize_warning() {
+  return std::string(
+             "Kokkos Error: Creating a ScopeGuard after Kokkos was finalized "
+             "is illegal.\n")
+      .append(scopeguard_correct_usage());
+}
+
+inline std::string scopeguard_destruct_after_finalize_warning() {
+  return std::string(
+             "Kokkos Error: Destroying a ScopeGuard after Kokkos was finalized "
+             "is illegal.\n")
+      .append(scopeguard_correct_usage());
+}
+
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+class KOKKOS_ATTRIBUTE_NODISCARD ScopeGuard {
+ public:
+#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
+  KOKKOS_ATTRIBUTE_NODISCARD
+#endif
+  ScopeGuard(int& argc, char* argv[]) {
+    sg_init = false;
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+    if (is_initialized()) {
+      std::cerr << Impl::scopeguard_create_while_initialized_warning()
+                << std::endl;
+    }
+    if (is_finalized()) {
+      std::cerr << Impl::scopeguard_create_after_finalize_warning()
+                << std::endl;
+    }
+#endif
+    if (!is_initialized()) {
+      initialize(argc, argv);
+      sg_init = true;
+    }
+  }
+
+#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
+  KOKKOS_ATTRIBUTE_NODISCARD
+#endif
+  explicit ScopeGuard(
+      const InitializationSettings& settings = InitializationSettings()) {
+    sg_init = false;
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+    if (is_initialized()) {
+      std::cerr << Impl::scopeguard_create_while_initialized_warning()
+                << std::endl;
+    }
+    if (is_finalized()) {
+      std::cerr << Impl::scopeguard_create_after_finalize_warning()
+                << std::endl;
+    }
+#endif
+    if (!is_initialized()) {
+      initialize(settings);
+      sg_init = true;
+    }
+  }
+
+  ~ScopeGuard() {
+#ifdef KOKKOS_ENABLE_DEPRECATION_WARNINGS
+    if (is_finalized()) {
+      std::cerr << Impl::scopeguard_destruct_after_finalize_warning()
+                << std::endl;
+    }
+#endif
+    if (is_initialized() && sg_init) {
+      finalize();
+    }
+  }
+
+ private:
+  bool sg_init;
+
+ public:
+  ScopeGuard& operator=(const ScopeGuard&) = delete;
+  ScopeGuard& operator=(ScopeGuard&&) = delete;
+  ScopeGuard(const ScopeGuard&)       = delete;
+  ScopeGuard(ScopeGuard&&)            = delete;
+};
+
+#else  // ifndef KOKKOS_ENABLE_DEPRECATED_CODE3
+
+class KOKKOS_ATTRIBUTE_NODISCARD ScopeGuard {
+ public:
+#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
+  KOKKOS_ATTRIBUTE_NODISCARD
+#endif
+  ScopeGuard(int& argc, char* argv[]) {
+    if (is_initialized()) {
+      Kokkos::abort(
+          Impl::scopeguard_create_while_initialized_warning().c_str());
+    }
+    if (is_finalized()) {
+      Kokkos::abort(Impl::scopeguard_create_after_finalize_warning().c_str());
+    }
+    initialize(argc, argv);
+  }
+
+#if defined(__has_cpp_attribute) && __has_cpp_attribute(nodiscard) >= 201907
+  KOKKOS_ATTRIBUTE_NODISCARD
+#endif
+  ScopeGuard(
+      const InitializationSettings& settings = InitializationSettings()) {
+    if (is_initialized()) {
+      Kokkos::abort(
+          Impl::scopeguard_create_while_initialized_warning().c_str());
+    }
+    if (is_finalized()) {
+      Kokkos::abort(Impl::scopeguard_create_after_finalize_warning().c_str());
+    }
+    initialize(settings);
+  }
+
+  ~ScopeGuard() {
+    if (is_finalized()) {
+      Kokkos::abort(Impl::scopeguard_destruct_after_finalize_warning().c_str());
+    }
+    finalize();
+  }
+
+  ScopeGuard& operator=(const ScopeGuard&) = delete;
+  ScopeGuard& operator=(ScopeGuard&&) = delete;
+  ScopeGuard(const ScopeGuard&)       = delete;
+  ScopeGuard(ScopeGuard&&)            = delete;
+};
+#endif
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+// Partitioning an Execution Space: expects space and integer arguments for
+// relative weight
+//   Customization point for backends
+//   Default behavior is to return the passed in instance
+template <class ExecSpace, class... Args>
+std::vector<ExecSpace> partition_space(ExecSpace space, Args...) {
+  static_assert(is_execution_space<ExecSpace>::value,
+                "Kokkos Error: partition_space expects an Execution Space as "
+                "first argument");
+#ifdef __cpp_fold_expressions
+  static_assert(
+      (... && std::is_arithmetic_v<Args>),
+      "Kokkos Error: partitioning arguments must be integers or floats");
+#endif
+  std::vector<ExecSpace> instances(sizeof...(Args));
+  for (int s = 0; s < int(sizeof...(Args)); s++) instances[s] = space;
+  return instances;
+}
+
+template <class ExecSpace, class T>
+std::vector<ExecSpace> partition_space(ExecSpace space,
+                                       std::vector<T>& weights) {
+  static_assert(is_execution_space<ExecSpace>::value,
+                "Kokkos Error: partition_space expects an Execution Space as "
+                "first argument");
+  static_assert(
+      std::is_arithmetic<T>::value,
+      "Kokkos Error: partitioning arguments must be integers or floats");
+
+  std::vector<ExecSpace> instances(weights.size());
+  for (int s = 0; s < int(weights.size()); s++) instances[s] = space;
+  return instances;
+}
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#include <Kokkos_Crs.hpp>
+#include <Kokkos_WorkGraphPolicy.hpp>
+// Including this in Kokkos_Parallel_Reduce.hpp led to a circular dependency
+// because Kokkos::Sum is used in Kokkos_Combined_Reducer.hpp and the default.
+// The real answer is to finally break up Kokkos_Parallel_Reduce.hpp into
+// smaller parts...
+#include <impl/Kokkos_Combined_Reducer.hpp>
+// Yet another workaround to deal with circular dependency issues because the
+// implementation of the RAII wrapper is using Kokkos::single.
+#include <Kokkos_AcquireUniqueTokenImpl.hpp>
+
+// Specializations required after core definitions
+#include <KokkosCore_Config_PostInclude.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Core_fwd.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Core_fwd.hpp
new file mode 100644 (file)
index 0000000..2bb323b
--- /dev/null
@@ -0,0 +1,439 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CORE_FWD_HPP
+#define KOKKOS_CORE_FWD_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE_FWD
+#endif
+
+//----------------------------------------------------------------------------
+// Kokkos_Macros.hpp does introspection on configuration options
+// and compiler environment then sets a collection of #define macros.
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Utilities.hpp>
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+#include <Kokkos_MasterLock.hpp>
+#endif
+
+//----------------------------------------------------------------------------
+// Have assumed a 64bit build (8byte pointers) throughout the code base.
+
+static_assert(sizeof(void *) == 8,
+              "Kokkos assumes 64-bit build; i.e., 8-byte pointers");
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+struct AUTO_t {
+  KOKKOS_INLINE_FUNCTION
+  constexpr const AUTO_t &operator()() const { return *this; }
+};
+
+namespace {
+/**\brief Token to indicate that a parameter's value is to be automatically
+ * selected */
+constexpr AUTO_t AUTO = Kokkos::AUTO_t();
+}  // namespace
+
+struct InvalidType {};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+// Forward declarations for class interrelationships
+
+namespace Kokkos {
+
+class HostSpace;  ///< Memory space for main process and CPU execution spaces
+class AnonymousSpace;
+
+template <class ExecutionSpace, class MemorySpace>
+struct Device;
+
+// forward declare here so that backend initializer calls can use it.
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+struct InitArguments;
+#endif
+class InitializationSettings;
+
+}  // namespace Kokkos
+
+// Include backend forward statements as determined by build options
+#include <KokkosCore_Config_FwdBackend.hpp>
+
+//----------------------------------------------------------------------------
+// Set the default execution space.
+
+/// Define Kokkos::DefaultExecutionSpace as per configuration option
+/// or chosen from the enabled execution spaces in the following order:
+/// Kokkos::Cuda, Kokkos::Experimental::OpenMPTarget, Kokkos::OpenMP,
+/// Kokkos::Threads, Kokkos::Serial
+
+#if defined(__clang_analyzer__)
+#define KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION \
+  [[clang::annotate("DefaultExecutionSpace")]]
+#define KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION \
+  [[clang::annotate("DefaultHostExecutionSpace")]]
+#else
+#define KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION
+#define KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION
+#endif
+
+namespace Kokkos {
+
+#if defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_CUDA)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION = Cuda;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMPTARGET)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
+    Experimental::OpenMPTarget;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
+    Experimental::HIP;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
+    Experimental::SYCL;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENACC)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
+    Experimental::OpenACC;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION = OpenMP;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_THREADS)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION = Threads;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HPX)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION =
+    Kokkos::Experimental::HPX;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SERIAL)
+using DefaultExecutionSpace KOKKOS_IMPL_DEFAULT_EXEC_SPACE_ANNOTATION = Serial;
+#else
+#error \
+    "At least one of the following execution spaces must be defined in order to use Kokkos: Kokkos::Cuda, Kokkos::Experimental::HIP, Kokkos::Experimental::SYCL, Kokkos::Experimental::OpenMPTarget, Kokkos::Experimental::OpenACC, Kokkos::OpenMP, Kokkos::Threads, Kokkos::Experimental::HPX, or Kokkos::Serial."
+#endif
+
+#if defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    OpenMP;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_THREADS)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    Threads;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HPX)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    Kokkos::Experimental::HPX;
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SERIAL)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    Serial;
+#elif defined(KOKKOS_ENABLE_OPENMP)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    OpenMP;
+#elif defined(KOKKOS_ENABLE_THREADS)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    Threads;
+#elif defined(KOKKOS_ENABLE_HPX)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    Kokkos::Experimental::HPX;
+#elif defined(KOKKOS_ENABLE_SERIAL)
+using DefaultHostExecutionSpace KOKKOS_IMPL_DEFAULT_HOST_EXEC_SPACE_ANNOTATION =
+    Serial;
+#else
+#error \
+    "At least one of the following execution spaces must be defined in order to use Kokkos: Kokkos::OpenMP, Kokkos::Threads, Kokkos::Experimental::HPX, or Kokkos::Serial."
+#endif
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+// Detect the active execution space and define its memory space.
+// This is used to verify whether a running kernel can access
+// a given memory space.
+
+namespace Kokkos {
+
+template <class AccessSpace, class MemorySpace>
+struct SpaceAccessibility;
+
+namespace Impl {
+
+// primary template: memory space is accessible, do nothing.
+template <class MemorySpace, class AccessSpace,
+          bool = SpaceAccessibility<AccessSpace, MemorySpace>::accessible>
+struct RuntimeCheckMemoryAccessViolation {
+  KOKKOS_FUNCTION RuntimeCheckMemoryAccessViolation(char const *const) {}
+};
+
+// explicit specialization: memory access violation will occur, call abort with
+// the specified error message.
+template <class MemorySpace, class AccessSpace>
+struct RuntimeCheckMemoryAccessViolation<MemorySpace, AccessSpace, false> {
+  KOKKOS_FUNCTION RuntimeCheckMemoryAccessViolation(char const *const msg) {
+    Kokkos::abort(msg);
+  }
+};
+
+// calls abort with default error message at runtime if memory access violation
+// will occur
+template <class MemorySpace>
+KOKKOS_FUNCTION void runtime_check_memory_access_violation() {
+  KOKKOS_IF_ON_HOST((
+      RuntimeCheckMemoryAccessViolation<MemorySpace, DefaultHostExecutionSpace>(
+          "ERROR: attempt to access inaccessible memory space");))
+  KOKKOS_IF_ON_DEVICE(
+      (RuntimeCheckMemoryAccessViolation<MemorySpace, DefaultExecutionSpace>(
+           "ERROR: attempt to access inaccessible memory space");))
+}
+
+// calls abort with specified error message at runtime if memory access
+// violation will occur
+template <class MemorySpace>
+KOKKOS_FUNCTION void runtime_check_memory_access_violation(
+    char const *const msg) {
+  KOKKOS_IF_ON_HOST((
+      (void)RuntimeCheckMemoryAccessViolation<MemorySpace,
+                                              DefaultHostExecutionSpace>(msg);))
+  KOKKOS_IF_ON_DEVICE((
+      (void)
+          RuntimeCheckMemoryAccessViolation<MemorySpace, DefaultExecutionSpace>(
+              msg);))
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+
+#if defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA) && \
+    defined(KOKKOS_ENABLE_CUDA)
+using ActiveExecutionMemorySpace KOKKOS_DEPRECATED = Kokkos::CudaSpace;
+#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL)
+using ActiveExecutionMemorySpace KOKKOS_DEPRECATED =
+    Kokkos::Experimental::SYCLDeviceUSMSpace;
+#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HIP_GPU)
+using ActiveExecutionMemorySpace KOKKOS_DEPRECATED =
+    Kokkos::Experimental::HIPSpace;
+#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST)
+using ActiveExecutionMemorySpace KOKKOS_DEPRECATED = Kokkos::HostSpace;
+#else
+using ActiveExecutionMemorySpace KOKKOS_DEPRECATED = void;
+#endif
+
+template <typename DstMemorySpace, typename SrcMemorySpace>
+struct MemorySpaceAccess;
+
+template <typename DstMemorySpace, typename SrcMemorySpace,
+          bool = Kokkos::Impl::MemorySpaceAccess<DstMemorySpace,
+                                                 SrcMemorySpace>::accessible>
+struct verify_space {
+  KOKKOS_DEPRECATED KOKKOS_FUNCTION static void check() {}
+};
+
+template <typename DstMemorySpace, typename SrcMemorySpace>
+struct verify_space<DstMemorySpace, SrcMemorySpace, false> {
+  KOKKOS_DEPRECATED KOKKOS_FUNCTION static void check() {
+    Kokkos::abort(
+        "Kokkos::View ERROR: attempt to access inaccessible memory space");
+  };
+};
+#endif
+
+}  // namespace Impl
+
+namespace Experimental {
+template <class, class, class, class>
+class LogicalMemorySpace;
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+#define KOKKOS_RESTRICT_EXECUTION_TO_DATA(DATA_SPACE, DATA_PTR)        \
+  Kokkos::Impl::verify_space<Kokkos::Impl::ActiveExecutionMemorySpace, \
+                             DATA_SPACE>::check();
+
+#define KOKKOS_RESTRICT_EXECUTION_TO_(DATA_SPACE)                      \
+  Kokkos::Impl::verify_space<Kokkos::Impl::ActiveExecutionMemorySpace, \
+                             DATA_SPACE>::check();
+#endif
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+// Getting ICE in Trilinos in Sacado and Intrepid in deep_copy
+// See issue https://github.com/kokkos/kokkos/issues/5290
+// Simply taking string by value did not resolve the issue
+#ifdef KOKKOS_COMPILER_INTEL
+void fence();
+void fence(const std::string &name);
+#else
+void fence(const std::string &name = "Kokkos::fence: Unnamed Global Fence");
+#endif
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class DataType, class... Properties>
+class View;
+
+namespace Impl {
+
+template <class DstSpace, class SrcSpace,
+          class ExecutionSpace = typename DstSpace::execution_space,
+          class Enable         = void>
+struct DeepCopy;
+
+template <typename ExecutionSpace, class DT, class... DP>
+struct ZeroMemset;
+
+template <class ViewType, class Layout = typename ViewType::array_layout,
+          class ExecSpace = typename ViewType::execution_space,
+          int Rank = ViewType::Rank, typename iType = int64_t>
+struct ViewFill;
+
+template <class ViewTypeA, class ViewTypeB, class Layout, class ExecSpace,
+          int Rank, typename iType>
+struct ViewCopy;
+
+template <class Functor, class Policy>
+struct FunctorPolicyExecutionSpace;
+
+//----------------------------------------------------------------------------
+/// \class ParallelFor
+/// \brief Implementation of the ParallelFor operator that has a
+///   partial specialization for the device.
+///
+/// This is an implementation detail of parallel_for.  Users should
+/// skip this and go directly to the nonmember function parallel_for.
+template <class FunctorType, class ExecPolicy,
+          class ExecutionSpace = typename Impl::FunctorPolicyExecutionSpace<
+              FunctorType, ExecPolicy>::execution_space>
+class ParallelFor;
+
+/// \class ParallelReduce
+/// \brief Implementation detail of parallel_reduce.
+///
+/// This is an implementation detail of parallel_reduce.  Users should
+/// skip this and go directly to the nonmember function parallel_reduce.
+template <class FunctorType, class ExecPolicy, class ReducerType = InvalidType,
+          class ExecutionSpace = typename Impl::FunctorPolicyExecutionSpace<
+              FunctorType, ExecPolicy>::execution_space>
+class ParallelReduce;
+
+/// \class ParallelScan
+/// \brief Implementation detail of parallel_scan.
+///
+/// This is an implementation detail of parallel_scan.  Users should
+/// skip this and go directly to the documentation of the nonmember
+/// template function Kokkos::parallel_scan.
+template <class FunctorType, class ExecPolicy,
+          class ExecutionSpace = typename Impl::FunctorPolicyExecutionSpace<
+              FunctorType, ExecPolicy>::execution_space>
+class ParallelScan;
+
+template <class FunctorType, class ExecPolicy, class ReturnType = InvalidType,
+          class ExecutionSpace = typename Impl::FunctorPolicyExecutionSpace<
+              FunctorType, ExecPolicy>::execution_space>
+class ParallelScanWithTotal;
+
+}  // namespace Impl
+
+template <class ScalarType, class Space = HostSpace>
+struct Sum;
+template <class ScalarType, class Space = HostSpace>
+struct Prod;
+template <class ScalarType, class Space = HostSpace>
+struct Min;
+template <class ScalarType, class Space = HostSpace>
+struct Max;
+template <class ScalarType, class Space = HostSpace>
+struct MinMax;
+template <class ScalarType, class Index, class Space = HostSpace>
+struct MinLoc;
+template <class ScalarType, class Index, class Space = HostSpace>
+struct MaxLoc;
+template <class ScalarType, class Index, class Space = HostSpace>
+struct MinMaxLoc;
+template <class ScalarType, class Space = HostSpace>
+struct BAnd;
+template <class ScalarType, class Space = HostSpace>
+struct BOr;
+template <class ScalarType, class Space = HostSpace>
+struct LAnd;
+template <class ScalarType, class Space = HostSpace>
+struct LOr;
+
+template <class Scalar, class Index, class Space = HostSpace>
+struct MaxFirstLoc;
+template <class Scalar, class Index, class ComparatorType,
+          class Space = HostSpace>
+struct MaxFirstLocCustomComparator;
+
+template <class Scalar, class Index, class Space = HostSpace>
+struct MinFirstLoc;
+template <class Scalar, class Index, class ComparatorType,
+          class Space = HostSpace>
+struct MinFirstLocCustomComparator;
+
+template <class Scalar, class Index, class Space = HostSpace>
+struct MinMaxFirstLastLoc;
+template <class Scalar, class Index, class ComparatorType,
+          class Space = HostSpace>
+struct MinMaxFirstLastLocCustomComparator;
+
+template <class Index, class Space = HostSpace>
+struct FirstLoc;
+template <class Index, class Space = HostSpace>
+struct LastLoc;
+template <class Index, class Space = HostSpace>
+struct StdIsPartitioned;
+template <class Index, class Space = HostSpace>
+struct StdPartitionPoint;
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE_FWD
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE_FWD
+#endif
+#endif /* #ifndef KOKKOS_CORE_FWD_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Crs.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Crs.hpp
new file mode 100644 (file)
index 0000000..9c0d1f6
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_CRS_HPP
+#define KOKKOS_CRS_HPP
+
+#include <Kokkos_View.hpp>
+#include <Kokkos_CopyViews.hpp>
+
+namespace Kokkos {
+
+/// \class Crs
+/// \brief Compressed row storage array.
+///
+/// \tparam DataType The type of stored entries.  If a Crs is
+///   used as the graph of a sparse matrix, then this is usually an
+///   integer type, the type of the column indices in the sparse
+///   matrix.
+///
+/// \tparam Arg1Type The second template parameter, corresponding
+///   either to the Device type (if there are no more template
+///   parameters) or to the Layout type (if there is at least one more
+///   template parameter).
+///
+/// \tparam Arg2Type The third template parameter, which if provided
+///   corresponds to the Device type.
+///
+/// \tparam SizeType The type of row offsets.  Usually the default
+///   parameter suffices.  However, setting a nondefault value is
+///   necessary in some cases, for example, if you want to have a
+///   sparse matrices with dimensions (and therefore column indices)
+///   that fit in \c int, but want to store more than <tt>INT_MAX</tt>
+///   entries in the sparse matrix.
+///
+/// A row has a range of entries:
+/// <ul>
+/// <li> <tt> row_map[i0] <= entry < row_map[i0+1] </tt> </li>
+/// <li> <tt> 0 <= i1 < row_map[i0+1] - row_map[i0] </tt> </li>
+/// <li> <tt> entries( entry ,            i2 , i3 , ... ); </tt> </li>
+/// <li> <tt> entries( row_map[i0] + i1 , i2 , i3 , ... ); </tt> </li>
+/// </ul>
+template <class DataType, class Arg1Type, class Arg2Type = void,
+          typename SizeType = typename ViewTraits<DataType*, Arg1Type, Arg2Type,
+                                                  void>::size_type>
+class Crs {
+ protected:
+  using traits = ViewTraits<DataType*, Arg1Type, Arg2Type, void>;
+
+ public:
+  using data_type       = DataType;
+  using array_layout    = typename traits::array_layout;
+  using execution_space = typename traits::execution_space;
+  using memory_space    = typename traits::memory_space;
+  using device_type     = typename traits::device_type;
+  using size_type       = SizeType;
+
+  using staticcrsgraph_type = Crs<DataType, Arg1Type, Arg2Type, SizeType>;
+  using HostMirror =
+      Crs<DataType, array_layout, typename traits::host_mirror_space, SizeType>;
+  using row_map_type = View<size_type*, array_layout, device_type>;
+  using entries_type = View<DataType*, array_layout, device_type>;
+
+  row_map_type row_map;
+  entries_type entries;
+
+  /*
+   * Default Constructors, operators and destructor
+   */
+  KOKKOS_DEFAULTED_FUNCTION Crs()           = default;
+  KOKKOS_DEFAULTED_FUNCTION Crs(Crs const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION Crs(Crs&&)      = default;
+  KOKKOS_DEFAULTED_FUNCTION Crs& operator=(Crs const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION Crs& operator=(Crs&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION ~Crs()                = default;
+
+  /** \brief Assign to a view of the rhs array.
+   *         If the old view is the last view
+   *         then allocated memory is deallocated.
+   */
+  template <class EntriesType, class RowMapType>
+  KOKKOS_INLINE_FUNCTION Crs(const RowMapType& row_map_,
+                             const EntriesType& entries_)
+      : row_map(row_map_), entries(entries_) {}
+
+  /**  \brief  Return number of rows in the graph
+   */
+  KOKKOS_INLINE_FUNCTION
+  size_type numRows() const {
+    return (row_map.extent(0) != 0)
+               ? row_map.extent(0) - static_cast<size_type>(1)
+               : static_cast<size_type>(0);
+  }
+};
+
+/*--------------------------------------------------------------------------*/
+
+template <class OutCounts, class DataType, class Arg1Type, class Arg2Type,
+          class SizeType>
+void get_crs_transpose_counts(
+    OutCounts& out, Crs<DataType, Arg1Type, Arg2Type, SizeType> const& in,
+    std::string const& name = "transpose_counts");
+
+template <class OutCounts, class InCrs>
+typename OutCounts::value_type get_crs_row_map_from_counts(
+    OutCounts& out, InCrs const& in, std::string const& name = "row_map");
+
+template <class DataType, class Arg1Type, class Arg2Type, class SizeType>
+void transpose_crs(Crs<DataType, Arg1Type, Arg2Type, SizeType>& out,
+                   Crs<DataType, Arg1Type, Arg2Type, SizeType> const& in);
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+template <class InCrs, class OutCounts>
+class GetCrsTransposeCounts {
+ public:
+  using execution_space = typename InCrs::execution_space;
+  using self_type       = GetCrsTransposeCounts<InCrs, OutCounts>;
+  using index_type      = typename InCrs::size_type;
+
+ private:
+  InCrs in;
+  OutCounts out;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  void operator()(index_type i) const { atomic_increment(&out[in.entries(i)]); }
+  GetCrsTransposeCounts(InCrs const& arg_in, OutCounts const& arg_out)
+      : in(arg_in), out(arg_out) {
+    using policy_type  = RangePolicy<index_type, execution_space>;
+    using closure_type = Kokkos::Impl::ParallelFor<self_type, policy_type>;
+    const closure_type closure(*this,
+                               policy_type(0, index_type(in.entries.size())));
+    closure.execute();
+    execution_space().fence(
+        "Kokkos::Impl::GetCrsTransposeCounts::GetCrsTransposeCounts: fence "
+        "after functor execution");
+  }
+};
+
+template <class InCounts, class OutRowMap>
+class CrsRowMapFromCounts {
+ public:
+  using execution_space = typename InCounts::execution_space;
+  using value_type      = typename OutRowMap::value_type;
+  using index_type      = typename InCounts::size_type;
+  using last_value_type =
+      Kokkos::View<value_type, typename InCounts::device_type>;
+
+ private:
+  InCounts m_in;
+  OutRowMap m_out;
+  last_value_type m_last_value;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  void operator()(index_type i, value_type& update, bool final_pass) const {
+    if (i < static_cast<index_type>(m_in.size())) {
+      update += m_in(i);
+      if (final_pass) m_out(i + 1) = update;
+    } else if (final_pass) {
+      m_out(0)       = 0;
+      m_last_value() = update;
+    }
+  }
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& update) const { update = 0; }
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& update, const value_type& input) const {
+    update += input;
+  }
+  using self_type = CrsRowMapFromCounts<InCounts, OutRowMap>;
+  CrsRowMapFromCounts(InCounts const& arg_in, OutRowMap const& arg_out)
+      : m_in(arg_in), m_out(arg_out), m_last_value("last_value") {}
+  value_type execute() {
+    using policy_type  = RangePolicy<index_type, execution_space>;
+    using closure_type = Kokkos::Impl::ParallelScan<self_type, policy_type>;
+    closure_type closure(*this, policy_type(0, m_in.size() + 1));
+    closure.execute();
+    auto last_value =
+        Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, m_last_value);
+    return last_value();
+  }
+};
+
+template <class InCrs, class OutCrs>
+class FillCrsTransposeEntries {
+ public:
+  using execution_space = typename InCrs::execution_space;
+  using memory_space    = typename InCrs::memory_space;
+  using value_type      = typename OutCrs::entries_type::value_type;
+  using index_type      = typename InCrs::size_type;
+
+ private:
+  using counters_type = View<index_type*, memory_space>;
+  InCrs in;
+  OutCrs out;
+  counters_type counters;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  void operator()(index_type i) const {
+    auto begin = in.row_map(i);
+    auto end   = in.row_map(i + 1);
+    for (auto j = begin; j < end; ++j) {
+      auto ti                  = in.entries(j);
+      auto tbegin              = out.row_map(ti);
+      auto tj                  = atomic_fetch_add(&counters(ti), 1);
+      out.entries(tbegin + tj) = i;
+    }
+  }
+  using self_type = FillCrsTransposeEntries<InCrs, OutCrs>;
+  FillCrsTransposeEntries(InCrs const& arg_in, OutCrs const& arg_out)
+      : in(arg_in), out(arg_out), counters("counters", arg_out.numRows()) {
+    using policy_type  = RangePolicy<index_type, execution_space>;
+    using closure_type = Kokkos::Impl::ParallelFor<self_type, policy_type>;
+    const closure_type closure(*this, policy_type(0, index_type(in.numRows())));
+    closure.execute();
+    execution_space().fence(
+        "Kokkos::Impl::FillCrsTransposeEntries::FillCrsTransposeEntries: fence "
+        "after functor execution");
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+template <class OutCounts, class DataType, class Arg1Type, class Arg2Type,
+          class SizeType>
+void get_crs_transpose_counts(
+    OutCounts& out, Crs<DataType, Arg1Type, Arg2Type, SizeType> const& in,
+    std::string const& name) {
+  using InCrs = Crs<DataType, Arg1Type, Arg2Type, SizeType>;
+  out         = OutCounts(name, in.numRows());
+  Kokkos::Impl::GetCrsTransposeCounts<InCrs, OutCounts> functor(in, out);
+}
+
+template <class OutRowMap, class InCounts>
+typename OutRowMap::value_type get_crs_row_map_from_counts(
+    OutRowMap& out, InCounts const& in, std::string const& name) {
+  out = OutRowMap(view_alloc(WithoutInitializing, name), in.size() + 1);
+  Kokkos::Impl::CrsRowMapFromCounts<InCounts, OutRowMap> functor(in, out);
+  return functor.execute();
+}
+
+template <class DataType, class Arg1Type, class Arg2Type, class SizeType>
+void transpose_crs(Crs<DataType, Arg1Type, Arg2Type, SizeType>& out,
+                   Crs<DataType, Arg1Type, Arg2Type, SizeType> const& in) {
+  using crs_type     = Crs<DataType, Arg1Type, Arg2Type, SizeType>;
+  using memory_space = typename crs_type::memory_space;
+  using counts_type  = View<SizeType*, memory_space>;
+  {
+    counts_type counts;
+    Kokkos::get_crs_transpose_counts(counts, in);
+    Kokkos::get_crs_row_map_from_counts(out.row_map, counts,
+                                        "tranpose_row_map");
+  }
+  out.entries = decltype(out.entries)("transpose_entries", in.entries.size());
+  Kokkos::Impl::FillCrsTransposeEntries<crs_type, crs_type> entries_functor(
+      in, out);
+}
+
+template <class CrsType, class Functor,
+          class ExecutionSpace = typename CrsType::execution_space>
+struct CountAndFillBase;
+
+template <class CrsType, class Functor, class ExecutionSpace>
+struct CountAndFillBase {
+  using data_type    = typename CrsType::data_type;
+  using size_type    = typename CrsType::size_type;
+  using row_map_type = typename CrsType::row_map_type;
+  using counts_type  = row_map_type;
+  CrsType m_crs;
+  Functor m_functor;
+  counts_type m_counts;
+  struct Count {};
+  inline void operator()(Count, size_type i) const {
+    m_counts(i) = m_functor(i, nullptr);
+  }
+  struct Fill {};
+  inline void operator()(Fill, size_type i) const {
+    auto j = m_crs.row_map(i);
+    /* we don't want to access entries(entries.size()), even if its just to get
+       its address and never use it. this can happen when row (i) is empty and
+       all rows after it are also empty. we could compare to row_map(i + 1), but
+       that is a read from global memory, whereas dimension_0() should be part
+       of the View in registers (or constant memory) */
+    data_type* fill = (j == static_cast<decltype(j)>(m_crs.entries.extent(0)))
+                          ? nullptr
+                          : (&(m_crs.entries(j)));
+    m_functor(i, fill);
+  }
+  CountAndFillBase(CrsType& crs, Functor const& f) : m_crs(crs), m_functor(f) {}
+};
+
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+#if defined(KOKKOS_ENABLE_CUDA)
+#define EXEC_SPACE Kokkos::Cuda
+#elif defined(KOKKOS_ENABLE_HIP)
+#define EXEC_SPACE Kokkos::Experimental::HIP
+#endif
+template <class CrsType, class Functor>
+struct CountAndFillBase<CrsType, Functor, EXEC_SPACE> {
+  using data_type    = typename CrsType::data_type;
+  using size_type    = typename CrsType::size_type;
+  using row_map_type = typename CrsType::row_map_type;
+  using counts_type  = row_map_type;
+  CrsType m_crs;
+  Functor m_functor;
+  counts_type m_counts;
+  struct Count {};
+  __device__ inline void operator()(Count, size_type i) const {
+    m_counts(i) = m_functor(i, nullptr);
+  }
+  struct Fill {};
+  __device__ inline void operator()(Fill, size_type i) const {
+    auto j = m_crs.row_map(i);
+    /* we don't want to access entries(entries.size()), even if its just to get
+       its address and never use it. this can happen when row (i) is empty and
+       all rows after it are also empty. we could compare to row_map(i + 1), but
+       that is a read from global memory, whereas dimension_0() should be part
+       of the View in registers (or constant memory) */
+    data_type* fill = (j == static_cast<decltype(j)>(m_crs.entries.extent(0)))
+                          ? nullptr
+                          : (&(m_crs.entries(j)));
+    m_functor(i, fill);
+  }
+  CountAndFillBase(CrsType& crs, Functor const& f) : m_crs(crs), m_functor(f) {}
+};
+#endif
+
+template <class CrsType, class Functor>
+struct CountAndFill : public CountAndFillBase<CrsType, Functor> {
+  using base_type = CountAndFillBase<CrsType, Functor>;
+  using typename base_type::Count;
+  using typename base_type::counts_type;
+  using typename base_type::data_type;
+  using typename base_type::Fill;
+  using typename base_type::size_type;
+  using entries_type = typename CrsType::entries_type;
+  using self_type    = CountAndFill<CrsType, Functor>;
+  CountAndFill(CrsType& crs, size_type nrows, Functor const& f)
+      : base_type(crs, f) {
+    using execution_space = typename CrsType::execution_space;
+    this->m_counts        = counts_type("counts", nrows);
+    {
+      using count_policy_type = RangePolicy<size_type, execution_space, Count>;
+      using count_closure_type =
+          Kokkos::Impl::ParallelFor<self_type, count_policy_type>;
+      const count_closure_type closure(*this, count_policy_type(0, nrows));
+      closure.execute();
+    }
+    auto nentries  = Kokkos::get_crs_row_map_from_counts(this->m_crs.row_map,
+                                                        this->m_counts);
+    this->m_counts = counts_type();
+    this->m_crs.entries = entries_type("entries", nentries);
+    {
+      using fill_policy_type = RangePolicy<size_type, execution_space, Fill>;
+      using fill_closure_type =
+          Kokkos::Impl::ParallelFor<self_type, fill_policy_type>;
+      const fill_closure_type closure(*this, fill_policy_type(0, nrows));
+      closure.execute();
+    }
+    crs = this->m_crs;
+  }
+};
+
+template <class CrsType, class Functor>
+void count_and_fill_crs(CrsType& crs, typename CrsType::size_type nrows,
+                        Functor const& f) {
+  Kokkos::CountAndFill<CrsType, Functor>(crs, nrows, f);
+}
+
+}  // namespace Kokkos
+
+#endif /* #define KOKKOS_CRS_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Cuda.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Cuda.hpp
new file mode 100644 (file)
index 0000000..72a00f4
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_CUDA_HPP
+#define KOKKOS_CUDA_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <iosfwd>
+#include <vector>
+
+#include <impl/Kokkos_AnalyzePolicy.hpp>
+#include <Kokkos_CudaSpace.hpp>
+#include <Cuda/Kokkos_Cuda_Error.hpp>  // CUDA_SAFE_CALL
+
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_TaskScheduler.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+class CudaExec;
+class CudaInternal;
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+namespace Impl {
+namespace Experimental {
+enum class CudaLaunchMechanism : unsigned {
+  Default        = 0,
+  ConstantMemory = 1,
+  GlobalMemory   = 2,
+  LocalMemory    = 4
+};
+
+constexpr inline CudaLaunchMechanism operator|(CudaLaunchMechanism p1,
+                                               CudaLaunchMechanism p2) {
+  return static_cast<CudaLaunchMechanism>(static_cast<unsigned>(p1) |
+                                          static_cast<unsigned>(p2));
+}
+constexpr inline CudaLaunchMechanism operator&(CudaLaunchMechanism p1,
+                                               CudaLaunchMechanism p2) {
+  return static_cast<CudaLaunchMechanism>(static_cast<unsigned>(p1) &
+                                          static_cast<unsigned>(p2));
+}
+
+template <CudaLaunchMechanism l>
+struct CudaDispatchProperties {
+  CudaLaunchMechanism launch_mechanism = l;
+};
+}  // namespace Experimental
+}  // namespace Impl
+/// \class Cuda
+/// \brief Kokkos Execution Space that uses CUDA to run on GPUs.
+///
+/// An "execution space" represents a parallel execution model.  It tells Kokkos
+/// how to parallelize the execution of kernels in a parallel_for or
+/// parallel_reduce.  For example, the Threads execution space uses
+/// C++11 threads on a CPU, the OpenMP execution space uses the OpenMP language
+/// extensions, and the Serial execution space executes "parallel" kernels
+/// sequentially.  The Cuda execution space uses NVIDIA's CUDA programming
+/// model to execute kernels in parallel on GPUs.
+class Cuda {
+ public:
+  //! \name Type declarations that all Kokkos execution spaces must provide.
+  //@{
+
+  //! Tag this class as a kokkos execution space
+  using execution_space = Cuda;
+
+#if defined(KOKKOS_ENABLE_CUDA_UVM)
+  //! This execution space's preferred memory space.
+  using memory_space = CudaUVMSpace;
+#else
+  //! This execution space's preferred memory space.
+  using memory_space = CudaSpace;
+#endif
+
+  //! This execution space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  //! The size_type best suited for this execution space.
+  using size_type = memory_space::size_type;
+
+  //! This execution space's preferred array layout.
+  using array_layout = LayoutLeft;
+
+  //!
+  using scratch_memory_space = ScratchMemorySpace<Cuda>;
+
+  //@}
+  //--------------------------------------------------
+  //! \name Functions that all Kokkos devices must implement.
+  //@{
+
+  /// \brief True if and only if this method is being called in a
+  ///   thread-parallel function.
+  KOKKOS_INLINE_FUNCTION static int in_parallel() {
+#if defined(__CUDA_ARCH__)
+    return true;
+#else
+    return false;
+#endif
+  }
+
+  /** \brief  Set the device in a "sleep" state.
+   *
+   * This function sets the device in a "sleep" state in which it is
+   * not ready for work.  This may consume less resources than if the
+   * device were in an "awake" state, but it may also take time to
+   * bring the device from a sleep state to be ready for work.
+   *
+   * \return True if the device is in the "sleep" state, else false if
+   *   the device is actively working and could not enter the "sleep"
+   *   state.
+   */
+  static bool sleep();
+
+  /// \brief Wake the device from the 'sleep' state so it is ready for work.
+  ///
+  /// \return True if the device is in the "ready" state, else "false"
+  ///  if the device is actively working (which also means that it's
+  ///  awake).
+  static bool wake();
+
+  /// \brief Wait until all dispatched functors complete.
+  ///
+  /// The parallel_for or parallel_reduce dispatch of a functor may
+  /// return asynchronously, before the functor completes.  This
+  /// method does not return until all dispatched functors on this
+  /// device have completed.
+  static void impl_static_fence(const std::string& name);
+
+  void fence(const std::string& name =
+                 "Kokkos::Cuda::fence(): Unnamed Instance Fence") const;
+
+  /** \brief  Return the maximum amount of concurrency.  */
+  static int concurrency();
+
+  //! Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  //@}
+  //--------------------------------------------------
+  //! \name  Cuda space instances
+
+  Cuda();
+
+  Cuda(cudaStream_t stream, bool manage_stream = false);
+
+  //--------------------------------------------------------------------------
+  //! Free any resources being consumed by the device.
+  static void impl_finalize();
+
+  //! Has been initialized
+  static int impl_is_initialized();
+
+  //! Initialize, telling the CUDA run-time library which device to use.
+  static void impl_initialize(InitializationSettings const&);
+
+  /// \brief Cuda device architecture of the selected device.
+  ///
+  /// This matches the __CUDA_ARCH__ specification.
+  static size_type device_arch();
+
+  //! Query device count.
+  static size_type detect_device_count();
+
+  /** \brief  Detect the available devices and their architecture
+   *          as defined by the __CUDA_ARCH__ specification.
+   */
+  static std::vector<unsigned> detect_device_arch();
+
+  cudaStream_t cuda_stream() const;
+  int cuda_device() const;
+  const cudaDeviceProp& cuda_device_prop() const;
+
+  //@}
+  //--------------------------------------------------------------------------
+
+  static const char* name();
+
+  inline Impl::CudaInternal* impl_internal_space_instance() const {
+    return m_space_instance.get();
+  }
+  uint32_t impl_instance_id() const noexcept;
+
+ private:
+  Kokkos::Impl::HostSharedPtr<Impl::CudaInternal> m_space_instance;
+};
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<Cuda> {
+  /// \brief An ID to differentiate (for example) Serial from OpenMP in Tooling
+  static constexpr DeviceType id = DeviceType::Cuda;
+  static int device_id(const Cuda& exec) { return exec.cuda_device(); }
+};
+}  // namespace Experimental
+}  // namespace Tools
+
+namespace Impl {
+
+template <class DT, class... DP>
+struct ZeroMemset<Kokkos::Cuda, DT, DP...> {
+  ZeroMemset(const Kokkos::Cuda& exec_space_instance,
+             const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(cudaMemsetAsync(
+        dst.data(), 0,
+        dst.size() * sizeof(typename View<DT, DP...>::value_type),
+        exec_space_instance.cuda_stream()));
+  }
+
+  ZeroMemset(const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    KOKKOS_IMPL_CUDA_SAFE_CALL(
+        cudaMemset(dst.data(), 0,
+                   dst.size() * sizeof(typename View<DT, DP...>::value_type)));
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaSpace,
+                         Kokkos::Cuda::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+#if defined(KOKKOS_ENABLE_CUDA_UVM)
+
+// If forcing use of UVM everywhere
+// then must assume that CudaUVMSpace
+// can be a stand-in for CudaSpace.
+// This will fail when a strange host-side execution space
+// that defines CudaUVMSpace as its preferredmemory space.
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaUVMSpace,
+                         Kokkos::Cuda::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+#endif
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
+#endif /* #ifndef KOKKOS_CUDA_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_CudaSpace.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_CudaSpace.hpp
new file mode 100644 (file)
index 0000000..7ec78c0
--- /dev/null
@@ -0,0 +1,797 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_CUDASPACE_HPP
+#define KOKKOS_CUDASPACE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <iosfwd>
+#include <typeinfo>
+#include <string>
+#include <memory>
+
+#include <Kokkos_HostSpace.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+#include <impl/Kokkos_Profiling_Interface.hpp>
+
+#include <Cuda/Kokkos_Cuda_abort.hpp>
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+extern "C" bool kokkos_impl_cuda_pin_uvm_to_host();
+extern "C" void kokkos_impl_cuda_set_pin_uvm_to_host(bool);
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename T>
+struct is_cuda_type_space : public std::false_type {};
+
+}  // namespace Impl
+
+/** \brief  Cuda on-device memory management */
+
+class CudaSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space    = CudaSpace;
+  using execution_space = Kokkos::Cuda;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+
+  using size_type = unsigned int;
+
+  /*--------------------------------*/
+
+  CudaSpace();
+  CudaSpace(CudaSpace&& rhs)      = default;
+  CudaSpace(const CudaSpace& rhs) = default;
+  CudaSpace& operator=(CudaSpace&& rhs) = default;
+  CudaSpace& operator=(const CudaSpace& rhs) = default;
+  ~CudaSpace()                               = default;
+
+  /**\brief  Allocate untracked memory in the cuda space */
+  void* allocate(const Cuda& exec_space, const size_t arg_alloc_size) const;
+  void* allocate(const Cuda& exec_space, const char* arg_label,
+                 const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the cuda space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class Kokkos::Experimental::LogicalMemorySpace;
+  void* impl_allocate(const Cuda& exec_space, const char* arg_label,
+                      const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return m_name; }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  /*--------------------------------*/
+  /** \brief  Error reporting for HostSpace attempt to access CudaSpace */
+  KOKKOS_DEPRECATED static void access_error();
+  KOKKOS_DEPRECATED static void access_error(const void* const);
+#endif
+
+ private:
+  int m_device;  ///< Which Cuda device
+
+  static constexpr const char* m_name = "Cuda";
+  friend class Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>;
+};
+
+template <>
+struct Impl::is_cuda_type_space<CudaSpace> : public std::true_type {};
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+/** \brief  Cuda memory that is accessible to Host execution space
+ *          through Cuda's unified virtual memory (UVM) runtime.
+ */
+class CudaUVMSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space    = CudaUVMSpace;
+  using execution_space = Cuda;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using size_type       = unsigned int;
+
+  /** \brief  If UVM capability is available */
+  static bool available();
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  /*--------------------------------*/
+  /** \brief  CudaUVMSpace specific routine */
+  KOKKOS_DEPRECATED static int number_of_allocations();
+#endif
+
+  /*--------------------------------*/
+
+  /*--------------------------------*/
+
+  CudaUVMSpace();
+  CudaUVMSpace(CudaUVMSpace&& rhs)      = default;
+  CudaUVMSpace(const CudaUVMSpace& rhs) = default;
+  CudaUVMSpace& operator=(CudaUVMSpace&& rhs) = default;
+  CudaUVMSpace& operator=(const CudaUVMSpace& rhs) = default;
+  ~CudaUVMSpace()                                  = default;
+
+  /**\brief  Allocate untracked memory in the cuda space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the cuda space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class Kokkos::Experimental::LogicalMemorySpace;
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return m_name; }
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+  static bool cuda_pin_uvm_to_host();
+  static void cuda_set_pin_uvm_to_host(bool val);
+#endif
+  /*--------------------------------*/
+
+ private:
+  int m_device;  ///< Which Cuda device
+
+#ifdef KOKKOS_IMPL_DEBUG_CUDA_PIN_UVM_TO_HOST
+  static bool kokkos_impl_cuda_pin_uvm_to_host_v;
+#endif
+  static constexpr const char* m_name = "CudaUVM";
+};
+
+template <>
+struct Impl::is_cuda_type_space<CudaUVMSpace> : public std::true_type {};
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+/** \brief  Host memory that is accessible to Cuda execution space
+ *          through Cuda's host-pinned memory allocation.
+ */
+class CudaHostPinnedSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  /** \brief  Memory is in HostSpace so use the HostSpace::execution_space */
+  using execution_space = HostSpace::execution_space;
+  using memory_space    = CudaHostPinnedSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using size_type       = unsigned int;
+
+  /*--------------------------------*/
+
+  CudaHostPinnedSpace();
+  CudaHostPinnedSpace(CudaHostPinnedSpace&& rhs)      = default;
+  CudaHostPinnedSpace(const CudaHostPinnedSpace& rhs) = default;
+  CudaHostPinnedSpace& operator=(CudaHostPinnedSpace&& rhs) = default;
+  CudaHostPinnedSpace& operator=(const CudaHostPinnedSpace& rhs) = default;
+  ~CudaHostPinnedSpace()                                         = default;
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class Kokkos::Experimental::LogicalMemorySpace;
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return m_name; }
+
+ private:
+  static constexpr const char* m_name = "CudaHostPinned";
+
+  /*--------------------------------*/
+};
+
+template <>
+struct Impl::is_cuda_type_space<CudaHostPinnedSpace> : public std::true_type {};
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+cudaStream_t cuda_get_deep_copy_stream();
+
+const std::unique_ptr<Kokkos::Cuda>& cuda_get_deep_copy_space(
+    bool initialize = true);
+
+static_assert(Kokkos::Impl::MemorySpaceAccess<Kokkos::CudaSpace,
+                                              Kokkos::CudaSpace>::assignable,
+              "");
+static_assert(Kokkos::Impl::MemorySpaceAccess<Kokkos::CudaUVMSpace,
+                                              Kokkos::CudaUVMSpace>::assignable,
+              "");
+static_assert(
+    Kokkos::Impl::MemorySpaceAccess<Kokkos::CudaHostPinnedSpace,
+                                    Kokkos::CudaHostPinnedSpace>::assignable,
+    "");
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::CudaSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::CudaUVMSpace> {
+  // HostSpace::execution_space != CudaUVMSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::CudaHostPinnedSpace> {
+  // HostSpace::execution_space == CudaHostPinnedSpace::execution_space
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaSpace, Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaSpace, Kokkos::CudaUVMSpace> {
+  // CudaSpace::execution_space == CudaUVMSpace::execution_space
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaSpace, Kokkos::CudaHostPinnedSpace> {
+  // CudaSpace::execution_space != CudaHostPinnedSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };  // CudaSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// CudaUVMSpace::execution_space == Cuda
+// CudaUVMSpace accessible to both Cuda and Host
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaUVMSpace, Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };  // Cuda cannot access HostSpace
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaUVMSpace, Kokkos::CudaSpace> {
+  // CudaUVMSpace::execution_space == CudaSpace::execution_space
+  // Can access CudaUVMSpace from Host but cannot access CudaSpace from Host
+  enum : bool { assignable = false };
+
+  // CudaUVMSpace::execution_space can access CudaSpace
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaUVMSpace, Kokkos::CudaHostPinnedSpace> {
+  // CudaUVMSpace::execution_space != CudaHostPinnedSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };  // CudaUVMSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// CudaHostPinnedSpace::execution_space == HostSpace::execution_space
+// CudaHostPinnedSpace accessible to both Cuda and Host
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaHostPinnedSpace, Kokkos::HostSpace> {
+  enum : bool { assignable = false };  // Cannot access from Cuda
+  enum : bool { accessible = true };   // CudaHostPinnedSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaHostPinnedSpace, Kokkos::CudaSpace> {
+  enum : bool { assignable = false };  // Cannot access from Host
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::CudaHostPinnedSpace, Kokkos::CudaUVMSpace> {
+  enum : bool { assignable = false };  // different execution_space
+  enum : bool { accessible = true };   // same accessibility
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+void DeepCopyCuda(void* dst, const void* src, size_t n);
+void DeepCopyAsyncCuda(const Cuda& instance, void* dst, const void* src,
+                       size_t n);
+void DeepCopyAsyncCuda(void* dst, const void* src, size_t n);
+
+template <class MemSpace>
+struct DeepCopy<MemSpace, HostSpace, Cuda,
+                std::enable_if_t<is_cuda_type_space<MemSpace>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopyCuda(dst, src, n); }
+  DeepCopy(const Cuda& instance, void* dst, const void* src, size_t n) {
+    DeepCopyAsyncCuda(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace>
+struct DeepCopy<HostSpace, MemSpace, Cuda,
+                std::enable_if_t<is_cuda_type_space<MemSpace>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopyCuda(dst, src, n); }
+  DeepCopy(const Cuda& instance, void* dst, const void* src, size_t n) {
+    DeepCopyAsyncCuda(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace1, class MemSpace2>
+struct DeepCopy<MemSpace1, MemSpace2, Cuda,
+                std::enable_if_t<is_cuda_type_space<MemSpace1>::value &&
+                                 is_cuda_type_space<MemSpace2>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopyCuda(dst, src, n); }
+  DeepCopy(const Cuda& instance, void* dst, const void* src, size_t n) {
+    DeepCopyAsyncCuda(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace1, class MemSpace2, class ExecutionSpace>
+struct DeepCopy<MemSpace1, MemSpace2, ExecutionSpace,
+                std::enable_if_t<is_cuda_type_space<MemSpace1>::value &&
+                                 is_cuda_type_space<MemSpace2>::value &&
+                                 !std::is_same<ExecutionSpace, Cuda>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopyCuda(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncCuda(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<") + MemSpace1::name() + "Space, " +
+        MemSpace2::name() +
+        "Space, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<MemSpace, HostSpace, ExecutionSpace,
+                std::enable_if_t<is_cuda_type_space<MemSpace>::value &&
+                                 !std::is_same<ExecutionSpace, Cuda>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopyCuda(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncCuda(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<") + MemSpace::name() +
+        "Space, HostSpace, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<HostSpace, MemSpace, ExecutionSpace,
+                std::enable_if_t<is_cuda_type_space<MemSpace>::value &&
+                                 !std::is_same<ExecutionSpace, Cuda>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopyCuda(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncCuda(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<HostSpace, ") + MemSpace::name() +
+        "Space, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+class SharedAllocationRecord<Kokkos::CudaSpace, void>
+    : public HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace> {
+ private:
+  friend class SharedAllocationRecord<Kokkos::CudaUVMSpace, void>;
+  friend class SharedAllocationRecordCommon<Kokkos::CudaSpace>;
+  friend class HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace>;
+
+  using RecordBase = SharedAllocationRecord<void, void>;
+  using base_t =
+      HostInaccessibleSharedAllocationRecordCommon<Kokkos::CudaSpace>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  static ::cudaTextureObject_t attach_texture_object(
+      const unsigned sizeof_alias, void* const alloc_ptr,
+      const size_t alloc_size);
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  static RecordBase s_root_record;
+#endif
+
+  ::cudaTextureObject_t m_tex_obj = 0;
+  const Kokkos::CudaSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+  SharedAllocationRecord() = default;
+
+  // This constructor does not forward to the one without exec_space arg
+  // in order to work around https://github.com/kokkos/kokkos/issues/5258
+  // This constructor is templated so I can't just put it into the cpp file
+  // like the other constructor.
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/, const Kokkos::CudaSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+            &SharedAllocationRecord<Kokkos::CudaSpace, void>::s_root_record,
+#endif
+            Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                 arg_alloc_size),
+            sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+            arg_label),
+        m_tex_obj(0),
+        m_space(arg_space) {
+
+    SharedAllocationHeader header;
+
+    this->base_t::_fill_host_accessible_header_info(header, arg_label);
+
+    // Copy to device memory
+    // workaround for issue with NVCC and MSVC
+    // https://github.com/kokkos/kokkos/issues/5258
+    deep_copy_header_no_exec(RecordBase::m_alloc_ptr, &header);
+  }
+
+  SharedAllocationRecord(
+      const Kokkos::Cuda& exec_space, const Kokkos::CudaSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+
+  SharedAllocationRecord(
+      const Kokkos::CudaSpace& arg_space, const std::string& arg_label,
+      const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+
+  // helper function to work around MSVC+NVCC issue
+  // https://github.com/kokkos/kokkos/issues/5258
+  static void deep_copy_header_no_exec(void*, const void*);
+
+ public:
+  template <typename AliasType>
+  inline ::cudaTextureObject_t attach_texture_object() {
+    static_assert((std::is_same<AliasType, int>::value ||
+                   std::is_same<AliasType, ::int2>::value ||
+                   std::is_same<AliasType, ::int4>::value),
+                  "Cuda texture fetch only supported for alias types of int, "
+                  "::int2, or ::int4");
+
+    if (m_tex_obj == 0) {
+      m_tex_obj = attach_texture_object(sizeof(AliasType),
+                                        (void*)RecordBase::m_alloc_ptr,
+                                        RecordBase::m_alloc_size);
+    }
+
+    return m_tex_obj;
+  }
+
+  template <typename AliasType>
+  inline int attach_texture_object_offset(const AliasType* const ptr) {
+    // Texture object is attached to the entire allocation range
+    return ptr - reinterpret_cast<AliasType*>(RecordBase::m_alloc_ptr);
+  }
+};
+
+template <>
+class SharedAllocationRecord<Kokkos::CudaUVMSpace, void>
+    : public SharedAllocationRecordCommon<Kokkos::CudaUVMSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<Kokkos::CudaUVMSpace>;
+
+  using base_t     = SharedAllocationRecordCommon<Kokkos::CudaUVMSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  static RecordBase s_root_record;
+
+  ::cudaTextureObject_t m_tex_obj = 0;
+  const Kokkos::CudaUVMSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+  SharedAllocationRecord() = default;
+
+  // This constructor does not forward to the one without exec_space arg
+  // in order to work around https://github.com/kokkos/kokkos/issues/5258
+  // This constructor is templated so I can't just put it into the cpp file
+  // like the other constructor.
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::CudaUVMSpace& arg_space, const std::string& arg_label,
+      const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+            &SharedAllocationRecord<Kokkos::CudaUVMSpace, void>::s_root_record,
+#endif
+            Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                 arg_alloc_size),
+            sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+            arg_label),
+        m_tex_obj(0),
+        m_space(arg_space) {
+    this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                    arg_label);
+  }
+
+  SharedAllocationRecord(
+      const Kokkos::CudaUVMSpace& arg_space, const std::string& arg_label,
+      const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+
+ public:
+  template <typename AliasType>
+  inline ::cudaTextureObject_t attach_texture_object() {
+    static_assert((std::is_same<AliasType, int>::value ||
+                   std::is_same<AliasType, ::int2>::value ||
+                   std::is_same<AliasType, ::int4>::value),
+                  "Cuda texture fetch only supported for alias types of int, "
+                  "::int2, or ::int4");
+
+    if (m_tex_obj == 0) {
+      m_tex_obj = SharedAllocationRecord<Kokkos::CudaSpace, void>::
+          attach_texture_object(sizeof(AliasType),
+                                (void*)RecordBase::m_alloc_ptr,
+                                RecordBase::m_alloc_size);
+    }
+
+    return m_tex_obj;
+  }
+
+  template <typename AliasType>
+  inline int attach_texture_object_offset(const AliasType* const ptr) {
+    // Texture object is attached to the entire allocation range
+    return ptr - reinterpret_cast<AliasType*>(RecordBase::m_alloc_ptr);
+  }
+};
+
+template <>
+class SharedAllocationRecord<Kokkos::CudaHostPinnedSpace, void>
+    : public SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace>;
+
+  using RecordBase = SharedAllocationRecord<void, void>;
+  using base_t     = SharedAllocationRecordCommon<Kokkos::CudaHostPinnedSpace>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  static RecordBase s_root_record;
+
+  const Kokkos::CudaHostPinnedSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+  SharedAllocationRecord() = default;
+
+  // This constructor does not forward to the one without exec_space arg
+  // in order to work around https://github.com/kokkos/kokkos/issues/5258
+  // This constructor is templated so I can't just put it into the cpp file
+  // like the other constructor.
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::CudaHostPinnedSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+            &SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
+                                    void>::s_root_record,
+#endif
+            Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                 arg_alloc_size),
+            sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+            arg_label),
+        m_space(arg_space) {
+    this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                    arg_label);
+  }
+
+  SharedAllocationRecord(
+      const Kokkos::CudaHostPinnedSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_CUDA ) */
+#endif /* #define KOKKOS_CUDASPACE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_DetectionIdiom.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_DetectionIdiom.hpp
new file mode 100644 (file)
index 0000000..d456938
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_DETECTION_IDIOM_HPP
+#define KOKKOS_DETECTION_IDIOM_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DETECTIONIDIOM
+#endif
+
+#include <impl/Kokkos_Utilities.hpp>  // void_t
+#include <type_traits>
+
+// NOTE This header implements the detection idiom from Version 2 of the C++
+// Extensions for Library Fundamentals, ISO/IEC TS 19568:2017
+
+// I deliberately omitted detected_or which does not fit well with the rest
+// of the specification. In my opinion, it should be removed from the TS.
+
+namespace Kokkos {
+
+namespace Impl {
+// base class for nonesuch to inherit from so it is not an aggregate
+struct nonesuch_base {};
+
+// primary template handles all types not supporting the archetypal Op
+template <class Default, class /*AlwaysVoid*/, template <class...> class Op,
+          class... /*Args*/>
+struct detector {
+  using value_t = std::false_type;
+  using type    = Default;
+};
+
+// specialization recognizes and handles only types supporting Op
+template <class Default, template <class...> class Op, class... Args>
+struct detector<Default, void_t<Op<Args...>>, Op, Args...> {
+  using value_t = std::true_type;
+  using type    = Op<Args...>;
+};
+}  // namespace Impl
+
+struct nonesuch : private Impl::nonesuch_base {
+  ~nonesuch()               = delete;
+  nonesuch(nonesuch const&) = delete;
+  void operator=(nonesuch const&) = delete;
+};
+
+template <template <class...> class Op, class... Args>
+using is_detected =
+    typename Impl::detector<nonesuch, void, Op, Args...>::value_t;
+
+template <template <class...> class Op, class... Args>
+using detected_t = typename Impl::detector<nonesuch, void, Op, Args...>::type;
+
+template <class Default, template <class...> class Op, class... Args>
+using detected_or_t = typename Impl::detector<Default, void, Op, Args...>::type;
+
+template <class Expected, template <class...> class Op, class... Args>
+using is_detected_exact = std::is_same<Expected, detected_t<Op, Args...>>;
+
+template <class To, template <class...> class Op, class... Args>
+using is_detected_convertible =
+    std::is_convertible<detected_t<Op, Args...>, To>;
+
+#ifdef KOKKOS_ENABLE_CXX17
+template <template <class...> class Op, class... Args>
+inline constexpr bool is_detected_v = is_detected<Op, Args...>::value;
+
+template <class Expected, template <class...> class Op, class... Args>
+inline constexpr bool is_detected_exact_v =
+    is_detected_exact<Expected, Op, Args...>::value;
+
+template <class Expected, template <class...> class Op, class... Args>
+inline constexpr bool is_detected_convertible_v =
+    is_detected_convertible<Expected, Op, Args...>::value;
+#endif
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DETECTIONIDIOM
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_DETECTIONIDIOM
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_ExecPolicy.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_ExecPolicy.hpp
new file mode 100644 (file)
index 0000000..4cd57ba
--- /dev/null
@@ -0,0 +1,945 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_EXECPOLICY_HPP
+#define KOKKOS_EXECPOLICY_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_AnalyzePolicy.hpp>
+#include <Kokkos_Concepts.hpp>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+struct ParallelForTag {};
+struct ParallelScanTag {};
+struct ParallelReduceTag {};
+
+struct ChunkSize {
+  int value;
+  ChunkSize(int value_) : value(value_) {}
+};
+
+/** \brief  Execution policy for work over a range of an integral type.
+ *
+ * Valid template argument options:
+ *
+ *  With a specified execution space:
+ *    < ExecSpace , WorkTag , { IntConst | IntType } >
+ *    < ExecSpace , WorkTag , void >
+ *    < ExecSpace , { IntConst | IntType } , void >
+ *    < ExecSpace , void , void >
+ *
+ *  With the default execution space:
+ *    < WorkTag , { IntConst | IntType } , void >
+ *    < WorkTag , void , void >
+ *    < { IntConst | IntType } , void , void >
+ *    < void , void , void >
+ *
+ *  IntType  is a fundamental integral type
+ *  IntConst is an Impl::integral_constant< IntType , Blocking >
+ *
+ *  Blocking is the granularity of partitioning the range among threads.
+ */
+template <class... Properties>
+class RangePolicy : public Impl::PolicyTraits<Properties...> {
+ public:
+  using traits = Impl::PolicyTraits<Properties...>;
+
+ private:
+  typename traits::execution_space m_space;
+  typename traits::index_type m_begin;
+  typename traits::index_type m_end;
+  typename traits::index_type m_granularity;
+  typename traits::index_type m_granularity_mask;
+
+  template <class... OtherProperties>
+  friend class RangePolicy;
+
+ public:
+  //! Tag this class as an execution policy
+  using execution_policy = RangePolicy<Properties...>;
+  using member_type      = typename traits::index_type;
+  using index_type       = typename traits::index_type;
+
+  KOKKOS_INLINE_FUNCTION const typename traits::execution_space& space() const {
+    return m_space;
+  }
+  KOKKOS_INLINE_FUNCTION member_type begin() const { return m_begin; }
+  KOKKOS_INLINE_FUNCTION member_type end() const { return m_end; }
+
+  // TODO: find a better workaround for Clangs weird instantiation order
+  // This thing is here because of an instantiation error, where the RangePolicy
+  // is inserted into FunctorValue Traits, which tries decltype on the operator.
+  // It tries to do this even though the first argument of parallel for clearly
+  // doesn't match.
+  void operator()(const int&) const {}
+
+  template <class... OtherProperties>
+  RangePolicy(const RangePolicy<OtherProperties...>& p)
+      : traits(p),  // base class may contain data such as desired occupancy
+        m_space(p.m_space),
+        m_begin(p.m_begin),
+        m_end(p.m_end),
+        m_granularity(p.m_granularity),
+        m_granularity_mask(p.m_granularity_mask) {}
+
+  inline RangePolicy()
+      : m_space(),
+        m_begin(0),
+        m_end(0),
+        m_granularity(0),
+        m_granularity_mask(0) {}
+
+  /** \brief  Total range */
+  inline RangePolicy(const typename traits::execution_space& work_space,
+                     const member_type work_begin, const member_type work_end)
+      : m_space(work_space),
+        m_begin(work_begin < work_end ? work_begin : 0),
+        m_end(work_begin < work_end ? work_end : 0),
+        m_granularity(0),
+        m_granularity_mask(0) {
+    set_auto_chunk_size();
+  }
+
+  /** \brief  Total range */
+  inline RangePolicy(const member_type work_begin, const member_type work_end)
+      : RangePolicy(typename traits::execution_space(), work_begin, work_end) {
+    set_auto_chunk_size();
+  }
+
+  /** \brief  Total range */
+  template <class... Args>
+  inline RangePolicy(const typename traits::execution_space& work_space,
+                     const member_type work_begin, const member_type work_end,
+                     Args... args)
+      : m_space(work_space),
+        m_begin(work_begin < work_end ? work_begin : 0),
+        m_end(work_begin < work_end ? work_end : 0),
+        m_granularity(0),
+        m_granularity_mask(0) {
+    set_auto_chunk_size();
+    set(args...);
+  }
+
+  /** \brief  Total range */
+  template <class... Args>
+  inline RangePolicy(const member_type work_begin, const member_type work_end,
+                     Args... args)
+      : RangePolicy(typename traits::execution_space(), work_begin, work_end) {
+    set_auto_chunk_size();
+    set(args...);
+  }
+
+ private:
+  inline void set() {}
+
+ public:
+  template <class... Args>
+  inline void set(Args...) {
+    static_assert(
+        0 == sizeof...(Args),
+        "Kokkos::RangePolicy: unhandled constructor arguments encountered.");
+  }
+
+  template <class... Args>
+  inline void set(const ChunkSize& chunksize, Args... args) {
+    m_granularity      = chunksize.value;
+    m_granularity_mask = m_granularity - 1;
+    set(args...);
+  }
+
+ public:
+  /** \brief return chunk_size */
+  inline member_type chunk_size() const { return m_granularity; }
+
+  /** \brief set chunk_size to a discrete value*/
+  inline RangePolicy& set_chunk_size(int chunk_size) {
+    m_granularity      = chunk_size;
+    m_granularity_mask = m_granularity - 1;
+    return *this;
+  }
+
+ private:
+  /** \brief finalize chunk_size if it was set to AUTO*/
+  inline void set_auto_chunk_size() {
+    int64_t concurrency =
+        static_cast<int64_t>(traits::execution_space::concurrency());
+    if (concurrency == 0) concurrency = 1;
+
+    if (m_granularity > 0) {
+      if (!Impl::is_integral_power_of_two(m_granularity))
+        Kokkos::abort("RangePolicy blocking granularity must be power of two");
+    }
+
+    int64_t new_chunk_size = 1;
+    while (new_chunk_size * 100 * concurrency <
+           static_cast<int64_t>(m_end - m_begin))
+      new_chunk_size *= 2;
+    if (new_chunk_size < 128) {
+      new_chunk_size = 1;
+      while ((new_chunk_size * 40 * concurrency <
+              static_cast<int64_t>(m_end - m_begin)) &&
+             (new_chunk_size < 128))
+        new_chunk_size *= 2;
+    }
+    m_granularity      = new_chunk_size;
+    m_granularity_mask = m_granularity - 1;
+  }
+
+ public:
+  /** \brief  Subrange for a partition's rank and size.
+   *
+   *  Typically used to partition a range over a group of threads.
+   */
+  struct WorkRange {
+    using work_tag    = typename RangePolicy<Properties...>::work_tag;
+    using member_type = typename RangePolicy<Properties...>::member_type;
+
+    KOKKOS_INLINE_FUNCTION member_type begin() const { return m_begin; }
+    KOKKOS_INLINE_FUNCTION member_type end() const { return m_end; }
+
+    /** \brief  Subrange for a partition's rank and size.
+     *
+     *  Typically used to partition a range over a group of threads.
+     */
+    KOKKOS_INLINE_FUNCTION
+    WorkRange(const RangePolicy& range, const int part_rank,
+              const int part_size)
+        : m_begin(0), m_end(0) {
+      if (part_size) {
+        // Split evenly among partitions, then round up to the granularity.
+        const member_type work_part =
+            ((((range.end() - range.begin()) + (part_size - 1)) / part_size) +
+             range.m_granularity_mask) &
+            ~member_type(range.m_granularity_mask);
+
+        m_begin = range.begin() + work_part * part_rank;
+        m_end   = m_begin + work_part;
+
+        if (range.end() < m_begin) m_begin = range.end();
+        if (range.end() < m_end) m_end = range.end();
+      }
+    }
+
+   private:
+    member_type m_begin;
+    member_type m_end;
+    WorkRange();
+    WorkRange& operator=(const WorkRange&);
+  };
+};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class ExecSpace, class... Properties>
+class TeamPolicyInternal : public Impl::PolicyTraits<Properties...> {
+ private:
+  using traits = Impl::PolicyTraits<Properties...>;
+
+ public:
+  using index_type = typename traits::index_type;
+
+  //----------------------------------------
+  /** \brief  Query maximum team size for a given functor.
+   *
+   *  This size takes into account execution space concurrency limitations and
+   *  scratch memory space limitations for reductions, team reduce/scan, and
+   *  team shared memory.
+   *
+   *  This function only works for single-operator functors.
+   *  With multi-operator functors it cannot be determined
+   *  which operator will be called.
+   */
+  template <class FunctorType>
+  static int team_size_max(const FunctorType&);
+
+  /** \brief  Query recommended team size for a given functor.
+   *
+   *  This size takes into account execution space concurrency limitations and
+   *  scratch memory space limitations for reductions, team reduce/scan, and
+   *  team shared memory.
+   *
+   *  This function only works for single-operator functors.
+   *  With multi-operator functors it cannot be determined
+   *  which operator will be called.
+   */
+  template <class FunctorType>
+  static int team_size_recommended(const FunctorType&);
+
+  template <class FunctorType>
+  static int team_size_recommended(const FunctorType&, const int&);
+
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType& functor,
+                            const int vector_length);
+
+  //----------------------------------------
+  /** \brief  Construct policy with the given instance of the execution space */
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request, int team_size_request,
+                     int vector_length_request = 1);
+
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request, const Kokkos::AUTO_t&,
+                     int vector_length_request = 1);
+
+  /** \brief  Construct policy with the default instance of the execution space
+   */
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     int vector_length_request = 1);
+
+  TeamPolicyInternal(int league_size_request, const Kokkos::AUTO_t&,
+                     int vector_length_request = 1);
+
+  /*  TeamPolicyInternal( int league_size_request , int team_size_request );
+
+    TeamPolicyInternal( int league_size_request , const Kokkos::AUTO_t & );*/
+
+  /** \brief  The actual league size (number of teams) of the policy.
+   *
+   *  This may be smaller than the requested league size due to limitations
+   *  of the execution space.
+   */
+  KOKKOS_INLINE_FUNCTION int league_size() const;
+
+  /** \brief  The actual team size (number of threads per team) of the policy.
+   *
+   *  This may be smaller than the requested team size due to limitations
+   *  of the execution space.
+   */
+  KOKKOS_INLINE_FUNCTION int team_size() const;
+
+  /** \brief Whether the policy has an automatically determined team size
+   */
+  inline bool impl_auto_team_size() const;
+  /** \brief Whether the policy has an automatically determined vector length
+   */
+  inline bool impl_auto_vector_length() const;
+
+  static int vector_length_max();
+
+  KOKKOS_INLINE_FUNCTION int impl_vector_length() const;
+
+  inline typename traits::index_type chunk_size() const;
+
+  inline TeamPolicyInternal& set_chunk_size(int chunk_size);
+
+  /** \brief  Parallel execution of a functor calls the functor once with
+   *          each member of the execution policy.
+   */
+  struct member_type {
+    /** \brief  Handle to the currently executing team shared scratch memory */
+    KOKKOS_INLINE_FUNCTION
+    typename traits::execution_space::scratch_memory_space team_shmem() const;
+
+    /** \brief  Rank of this team within the league of teams */
+    KOKKOS_INLINE_FUNCTION int league_rank() const;
+
+    /** \brief  Number of teams in the league */
+    KOKKOS_INLINE_FUNCTION int league_size() const;
+
+    /** \brief  Rank of this thread within this team */
+    KOKKOS_INLINE_FUNCTION int team_rank() const;
+
+    /** \brief  Number of threads in this team */
+    KOKKOS_INLINE_FUNCTION int team_size() const;
+
+    /** \brief  Barrier among the threads of this team */
+    KOKKOS_INLINE_FUNCTION void team_barrier() const;
+
+    /** \brief  Intra-team reduction. Returns join of all values of the team
+     * members. */
+    template <class JoinOp>
+    KOKKOS_INLINE_FUNCTION typename JoinOp::value_type team_reduce(
+        const typename JoinOp::value_type, const JoinOp&) const;
+
+    /** \brief  Intra-team exclusive prefix sum with team_rank() ordering.
+     *
+     *  The highest rank thread can compute the reduction total as
+     *    reduction_total = dev.team_scan( value ) + value ;
+     */
+    template <typename Type>
+    KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value) const;
+
+    /** \brief  Intra-team exclusive prefix sum with team_rank() ordering
+     *          with intra-team non-deterministic ordering accumulation.
+     *
+     *  The global inter-team accumulation value will, at the end of the
+     *  league's parallel execution, be the scan's total.
+     *  Parallel execution ordering of the league's teams is non-deterministic.
+     *  As such the base value for each team's scan operation is similarly
+     *  non-deterministic.
+     */
+    template <typename Type>
+    KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value,
+                                          Type* const global_accum) const;
+  };
+};
+
+struct PerTeamValue {
+  size_t value;
+  PerTeamValue(size_t arg);
+};
+
+struct PerThreadValue {
+  size_t value;
+  PerThreadValue(size_t arg);
+};
+
+template <class iType, class... Args>
+struct ExtractVectorLength {
+  static inline iType value(
+      std::enable_if_t<std::is_integral<iType>::value, iType> val, Args...) {
+    return val;
+  }
+  static inline std::enable_if_t<!std::is_integral<iType>::value, int> value(
+      std::enable_if_t<!std::is_integral<iType>::value, iType>, Args...) {
+    return 1;
+  }
+};
+
+template <class iType, class... Args>
+inline std::enable_if_t<std::is_integral<iType>::value, iType>
+extract_vector_length(iType val, Args...) {
+  return val;
+}
+
+template <class iType, class... Args>
+inline std::enable_if_t<!std::is_integral<iType>::value, int>
+extract_vector_length(iType, Args...) {
+  return 1;
+}
+
+}  // namespace Impl
+
+Impl::PerTeamValue PerTeam(const size_t& arg);
+Impl::PerThreadValue PerThread(const size_t& arg);
+
+struct ScratchRequest {
+  int level;
+
+  size_t per_team;
+  size_t per_thread;
+
+  inline ScratchRequest(const int& level_,
+                        const Impl::PerTeamValue& team_value) {
+    level      = level_;
+    per_team   = team_value.value;
+    per_thread = 0;
+  }
+
+  inline ScratchRequest(const int& level_,
+                        const Impl::PerThreadValue& thread_value) {
+    level      = level_;
+    per_team   = 0;
+    per_thread = thread_value.value;
+  }
+
+  inline ScratchRequest(const int& level_, const Impl::PerTeamValue& team_value,
+                        const Impl::PerThreadValue& thread_value) {
+    level      = level_;
+    per_team   = team_value.value;
+    per_thread = thread_value.value;
+  }
+
+  inline ScratchRequest(const int& level_,
+                        const Impl::PerThreadValue& thread_value,
+                        const Impl::PerTeamValue& team_value) {
+    level      = level_;
+    per_team   = team_value.value;
+    per_thread = thread_value.value;
+  }
+};
+
+// Throws a runtime exception if level is not `0` or `1`
+void team_policy_check_valid_storage_level_argument(int level);
+
+/** \brief  Execution policy for parallel work over a league of teams of
+ * threads.
+ *
+ *  The work functor is called for each thread of each team such that
+ *  the team's member threads are guaranteed to be concurrent.
+ *
+ *  The team's threads have access to team shared scratch memory and
+ *  team collective operations.
+ *
+ *  If the WorkTag is non-void then the first calling argument of the
+ *  work functor's parentheses operator is 'const WorkTag &'.
+ *  This allows a functor to have multiple work member functions.
+ *
+ *  Order of template arguments does not matter, since the implementation
+ *  uses variadic templates. Each and any of the template arguments can
+ *  be omitted.
+ *
+ *  Possible Template arguments and their default values:
+ *    ExecutionSpace (DefaultExecutionSpace): where to execute code. Must be
+ * enabled. WorkTag (none): Tag which is used as the first argument for the
+ * functor operator. Schedule<Type> (Schedule<Static>): Scheduling Policy
+ * (Dynamic, or Static). IndexType<Type> (IndexType<ExecutionSpace::size_type>:
+ * Integer Index type used to iterate over the Index space.
+ *    LaunchBounds<unsigned,unsigned> Launch Bounds for CUDA compilation,
+ *    default of LaunchBounds<0,0> indicates no launch bounds specified.
+ */
+template <class... Properties>
+class TeamPolicy
+    : public Impl::TeamPolicyInternal<
+          typename Impl::PolicyTraits<Properties...>::execution_space,
+          Properties...> {
+  using internal_policy = Impl::TeamPolicyInternal<
+      typename Impl::PolicyTraits<Properties...>::execution_space,
+      Properties...>;
+
+  template <class... OtherProperties>
+  friend class TeamPolicy;
+
+ public:
+  using traits = Impl::PolicyTraits<Properties...>;
+
+  using execution_policy = TeamPolicy<Properties...>;
+
+  TeamPolicy() : internal_policy(0, AUTO) {}
+
+  /** \brief  Construct policy with the given instance of the execution space */
+  TeamPolicy(const typename traits::execution_space& space_,
+             int league_size_request, int team_size_request,
+             int vector_length_request = 1)
+      : internal_policy(space_, league_size_request, team_size_request,
+                        vector_length_request) {}
+
+  TeamPolicy(const typename traits::execution_space& space_,
+             int league_size_request, const Kokkos::AUTO_t&,
+             int vector_length_request = 1)
+      : internal_policy(space_, league_size_request, Kokkos::AUTO(),
+                        vector_length_request) {}
+
+  TeamPolicy(const typename traits::execution_space& space_,
+             int league_size_request, const Kokkos::AUTO_t&,
+             const Kokkos::AUTO_t&)
+      : internal_policy(space_, league_size_request, Kokkos::AUTO(),
+                        Kokkos::AUTO()) {}
+  TeamPolicy(const typename traits::execution_space& space_,
+             int league_size_request, const int team_size_request,
+             const Kokkos::AUTO_t&)
+      : internal_policy(space_, league_size_request, team_size_request,
+                        Kokkos::AUTO()) {}
+  /** \brief  Construct policy with the default instance of the execution space
+   */
+  TeamPolicy(int league_size_request, int team_size_request,
+             int vector_length_request = 1)
+      : internal_policy(league_size_request, team_size_request,
+                        vector_length_request) {}
+
+  TeamPolicy(int league_size_request, const Kokkos::AUTO_t&,
+             int vector_length_request = 1)
+      : internal_policy(league_size_request, Kokkos::AUTO(),
+                        vector_length_request) {}
+
+  TeamPolicy(int league_size_request, const Kokkos::AUTO_t&,
+             const Kokkos::AUTO_t&)
+      : internal_policy(league_size_request, Kokkos::AUTO(), Kokkos::AUTO()) {}
+  TeamPolicy(int league_size_request, const int team_size_request,
+             const Kokkos::AUTO_t&)
+      : internal_policy(league_size_request, team_size_request,
+                        Kokkos::AUTO()) {}
+
+  template <class... OtherProperties>
+  TeamPolicy(const TeamPolicy<OtherProperties...> p) : internal_policy(p) {
+    // Cannot call converting constructor in the member initializer list because
+    // it is not a direct base.
+    internal_policy::traits::operator=(p);
+  }
+
+ private:
+  TeamPolicy(const internal_policy& p) : internal_policy(p) {}
+
+ public:
+  inline TeamPolicy& set_chunk_size(int chunk) {
+    static_assert(std::is_same<decltype(internal_policy::set_chunk_size(chunk)),
+                               internal_policy&>::value,
+                  "internal set_chunk_size should return a reference");
+    return static_cast<TeamPolicy&>(internal_policy::set_chunk_size(chunk));
+  }
+
+  inline TeamPolicy& set_scratch_size(const int& level,
+                                      const Impl::PerTeamValue& per_team) {
+    static_assert(std::is_same<decltype(internal_policy::set_scratch_size(
+                                   level, per_team)),
+                               internal_policy&>::value,
+                  "internal set_chunk_size should return a reference");
+
+    team_policy_check_valid_storage_level_argument(level);
+    return static_cast<TeamPolicy&>(
+        internal_policy::set_scratch_size(level, per_team));
+  }
+  inline TeamPolicy& set_scratch_size(const int& level,
+                                      const Impl::PerThreadValue& per_thread) {
+    team_policy_check_valid_storage_level_argument(level);
+    return static_cast<TeamPolicy&>(
+        internal_policy::set_scratch_size(level, per_thread));
+  }
+  inline TeamPolicy& set_scratch_size(const int& level,
+                                      const Impl::PerTeamValue& per_team,
+                                      const Impl::PerThreadValue& per_thread) {
+    team_policy_check_valid_storage_level_argument(level);
+    return static_cast<TeamPolicy&>(
+        internal_policy::set_scratch_size(level, per_team, per_thread));
+  }
+  inline TeamPolicy& set_scratch_size(const int& level,
+                                      const Impl::PerThreadValue& per_thread,
+                                      const Impl::PerTeamValue& per_team) {
+    team_policy_check_valid_storage_level_argument(level);
+    return static_cast<TeamPolicy&>(
+        internal_policy::set_scratch_size(level, per_team, per_thread));
+  }
+};
+
+namespace Impl {
+
+template <typename iType, class TeamMemberType>
+struct TeamThreadRangeBoundariesStruct {
+ private:
+  KOKKOS_INLINE_FUNCTION static iType ibegin(const iType& arg_begin,
+                                             const iType& arg_end,
+                                             const iType& arg_rank,
+                                             const iType& arg_size) {
+    return arg_begin +
+           ((arg_end - arg_begin + arg_size - 1) / arg_size) * arg_rank;
+  }
+
+  KOKKOS_INLINE_FUNCTION static iType iend(const iType& arg_begin,
+                                           const iType& arg_end,
+                                           const iType& arg_rank,
+                                           const iType& arg_size) {
+    const iType end_ =
+        arg_begin +
+        ((arg_end - arg_begin + arg_size - 1) / arg_size) * (arg_rank + 1);
+    return end_ < arg_end ? end_ : arg_end;
+  }
+
+ public:
+  using index_type = iType;
+  const iType start;
+  const iType end;
+  enum { increment = 1 };
+  const TeamMemberType& thread;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const TeamMemberType& arg_thread,
+                                  const iType& arg_end)
+      : start(
+            ibegin(0, arg_end, arg_thread.team_rank(), arg_thread.team_size())),
+        end(iend(0, arg_end, arg_thread.team_rank(), arg_thread.team_size())),
+        thread(arg_thread) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const TeamMemberType& arg_thread,
+                                  const iType& arg_begin, const iType& arg_end)
+      : start(ibegin(arg_begin, arg_end, arg_thread.team_rank(),
+                     arg_thread.team_size())),
+        end(iend(arg_begin, arg_end, arg_thread.team_rank(),
+                 arg_thread.team_size())),
+        thread(arg_thread) {}
+};
+
+template <typename iType, class TeamMemberType>
+struct TeamVectorRangeBoundariesStruct {
+ private:
+  KOKKOS_INLINE_FUNCTION static iType ibegin(const iType& arg_begin,
+                                             const iType& arg_end,
+                                             const iType& arg_rank,
+                                             const iType& arg_size) {
+    return arg_begin +
+           ((arg_end - arg_begin + arg_size - 1) / arg_size) * arg_rank;
+  }
+
+  KOKKOS_INLINE_FUNCTION static iType iend(const iType& arg_begin,
+                                           const iType& arg_end,
+                                           const iType& arg_rank,
+                                           const iType& arg_size) {
+    const iType end_ =
+        arg_begin +
+        ((arg_end - arg_begin + arg_size - 1) / arg_size) * (arg_rank + 1);
+    return end_ < arg_end ? end_ : arg_end;
+  }
+
+ public:
+  using index_type = iType;
+  const iType start;
+  const iType end;
+  enum { increment = 1 };
+  const TeamMemberType& thread;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const TeamMemberType& arg_thread,
+                                  const iType& arg_end)
+      : start(
+            ibegin(0, arg_end, arg_thread.team_rank(), arg_thread.team_size())),
+        end(iend(0, arg_end, arg_thread.team_rank(), arg_thread.team_size())),
+        thread(arg_thread) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const TeamMemberType& arg_thread,
+                                  const iType& arg_begin, const iType& arg_end)
+      : start(ibegin(arg_begin, arg_end, arg_thread.team_rank(),
+                     arg_thread.team_size())),
+        end(iend(arg_begin, arg_end, arg_thread.team_rank(),
+                 arg_thread.team_size())),
+        thread(arg_thread) {}
+};
+
+template <typename iType, class TeamMemberType>
+struct ThreadVectorRangeBoundariesStruct {
+  using index_type = iType;
+  const index_type start;
+  const index_type end;
+  enum { increment = 1 };
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ThreadVectorRangeBoundariesStruct(const TeamMemberType,
+                                              const index_type& count) noexcept
+      : start(static_cast<index_type>(0)), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ThreadVectorRangeBoundariesStruct(const index_type& count) noexcept
+      : start(static_cast<index_type>(0)), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ThreadVectorRangeBoundariesStruct(
+      const TeamMemberType, const index_type& arg_begin,
+      const index_type& arg_end) noexcept
+      : start(static_cast<index_type>(arg_begin)), end(arg_end) {}
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ThreadVectorRangeBoundariesStruct(
+      const index_type& arg_begin, const index_type& arg_end) noexcept
+      : start(static_cast<index_type>(arg_begin)), end(arg_end) {}
+};
+
+template <class TeamMemberType>
+struct ThreadSingleStruct {
+  const TeamMemberType& team_member;
+  KOKKOS_INLINE_FUNCTION
+  ThreadSingleStruct(const TeamMemberType& team_member_)
+      : team_member(team_member_) {}
+};
+
+template <class TeamMemberType>
+struct VectorSingleStruct {
+  const TeamMemberType& team_member;
+  KOKKOS_INLINE_FUNCTION
+  VectorSingleStruct(const TeamMemberType& team_member_)
+      : team_member(team_member_) {}
+};
+
+}  // namespace Impl
+
+/** \brief  Execution policy for parallel work over a threads within a team.
+ *
+ *  The range is split over all threads in a team. The Mapping scheme depends on
+ * the architecture. This policy is used together with a parallel pattern as a
+ * nested layer within a kernel launched with the TeamPolicy. This variant
+ * expects a single count. So the range is (0,count].
+ */
+template <typename iType, class TeamMemberType, class _never_use_this_overload>
+KOKKOS_INLINE_FUNCTION_DELETED
+    Impl::TeamThreadRangeBoundariesStruct<iType, TeamMemberType>
+    TeamThreadRange(const TeamMemberType&, const iType& count) = delete;
+
+/** \brief  Execution policy for parallel work over a threads within a team.
+ *
+ *  The range is split over all threads in a team. The Mapping scheme depends on
+ * the architecture. This policy is used together with a parallel pattern as a
+ * nested layer within a kernel launched with the TeamPolicy. This variant
+ * expects a begin and end. So the range is (begin,end].
+ */
+template <typename iType1, typename iType2, class TeamMemberType,
+          class _never_use_this_overload>
+KOKKOS_INLINE_FUNCTION_DELETED Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, TeamMemberType>
+TeamThreadRange(const TeamMemberType&, const iType1& begin,
+                const iType2& end) = delete;
+
+/** \brief  Execution policy for parallel work over a threads within a team.
+ *
+ *  The range is split over all threads in a team. The Mapping scheme depends on
+ * the architecture. This policy is used together with a parallel pattern as a
+ * nested layer within a kernel launched with the TeamPolicy. This variant
+ * expects a single count. So the range is (0,count].
+ */
+template <typename iType, class TeamMemberType, class _never_use_this_overload>
+KOKKOS_INLINE_FUNCTION_DELETED
+    Impl::TeamThreadRangeBoundariesStruct<iType, TeamMemberType>
+    TeamVectorRange(const TeamMemberType&, const iType& count) = delete;
+
+/** \brief  Execution policy for parallel work over a threads within a team.
+ *
+ *  The range is split over all threads in a team. The Mapping scheme depends on
+ * the architecture. This policy is used together with a parallel pattern as a
+ * nested layer within a kernel launched with the TeamPolicy. This variant
+ * expects a begin and end. So the range is (begin,end].
+ */
+template <typename iType1, typename iType2, class TeamMemberType,
+          class _never_use_this_overload>
+KOKKOS_INLINE_FUNCTION_DELETED Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, TeamMemberType>
+TeamVectorRange(const TeamMemberType&, const iType1& begin,
+                const iType2& end) = delete;
+
+/** \brief  Execution policy for a vector parallel loop.
+ *
+ *  The range is split over all vector lanes in a thread. The Mapping scheme
+ * depends on the architecture. This policy is used together with a parallel
+ * pattern as a nested layer within a kernel launched with the TeamPolicy. This
+ * variant expects a single count. So the range is (0,count].
+ */
+template <typename iType, class TeamMemberType, class _never_use_this_overload>
+KOKKOS_INLINE_FUNCTION_DELETED
+    Impl::ThreadVectorRangeBoundariesStruct<iType, TeamMemberType>
+    ThreadVectorRange(const TeamMemberType&, const iType& count) = delete;
+
+template <typename iType1, typename iType2, class TeamMemberType,
+          class _never_use_this_overload>
+KOKKOS_INLINE_FUNCTION_DELETED Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, TeamMemberType>
+ThreadVectorRange(const TeamMemberType&, const iType1& arg_begin,
+                  const iType2& arg_end) = delete;
+
+namespace Impl {
+
+template <typename FunctorType, typename TagType,
+          bool HasTag = !std::is_void<TagType>::value>
+struct ParallelConstructName;
+
+template <typename FunctorType, typename TagType>
+struct ParallelConstructName<FunctorType, TagType, true> {
+  ParallelConstructName(std::string const& label) : label_ref(label) {
+    if (label.empty()) {
+      default_name = std::string(typeid(FunctorType).name()) + "/" +
+                     typeid(TagType).name();
+    }
+  }
+  std::string const& get() {
+    return (label_ref.empty()) ? default_name : label_ref;
+  }
+  std::string const& label_ref;
+  std::string default_name;
+};
+
+template <typename FunctorType, typename TagType>
+struct ParallelConstructName<FunctorType, TagType, false> {
+  ParallelConstructName(std::string const& label) : label_ref(label) {
+    if (label.empty()) {
+      default_name = std::string(typeid(FunctorType).name());
+    }
+  }
+  std::string const& get() {
+    return (label_ref.empty()) ? default_name : label_ref;
+  }
+  std::string const& label_ref;
+  std::string default_name;
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class PatternTag, class... Args>
+struct PatternImplSpecializationFromTag;
+
+template <class... Args>
+struct PatternImplSpecializationFromTag<Kokkos::ParallelForTag, Args...>
+    : identity<ParallelFor<Args...>> {};
+
+template <class... Args>
+struct PatternImplSpecializationFromTag<Kokkos::ParallelReduceTag, Args...>
+    : identity<ParallelReduce<Args...>> {};
+
+template <class... Args>
+struct PatternImplSpecializationFromTag<Kokkos::ParallelScanTag, Args...>
+    : identity<ParallelScan<Args...>> {};
+
+template <class PatternImpl>
+struct PatternTagFromImplSpecialization;
+
+template <class... Args>
+struct PatternTagFromImplSpecialization<ParallelFor<Args...>>
+    : identity<ParallelForTag> {};
+
+template <class... Args>
+struct PatternTagFromImplSpecialization<ParallelReduce<Args...>>
+    : identity<ParallelReduceTag> {};
+
+template <class... Args>
+struct PatternTagFromImplSpecialization<ParallelScan<Args...>>
+    : identity<ParallelScanTag> {};
+
+}  // end namespace Impl
+
+}  // namespace Kokkos
+#endif /* #define KOKKOS_EXECPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Extents.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Extents.hpp
new file mode 100644 (file)
index 0000000..c51d663
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_KOKKOS_EXTENTS_HPP
+#define KOKKOS_KOKKOS_EXTENTS_HPP
+
+#include <cstddef>
+#include <type_traits>
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+constexpr ptrdiff_t dynamic_extent = -1;
+
+template <ptrdiff_t... ExtentSpecs>
+struct Extents {
+  /* TODO @enhancement flesh this out more */
+};
+
+template <class Exts, ptrdiff_t NewExtent>
+struct PrependExtent;
+
+template <ptrdiff_t... Exts, ptrdiff_t NewExtent>
+struct PrependExtent<Extents<Exts...>, NewExtent> {
+  using type = Extents<NewExtent, Exts...>;
+};
+
+template <class Exts, ptrdiff_t NewExtent>
+struct AppendExtent;
+
+template <ptrdiff_t... Exts, ptrdiff_t NewExtent>
+struct AppendExtent<Extents<Exts...>, NewExtent> {
+  using type = Extents<Exts..., NewExtent>;
+};
+
+}  // end namespace Experimental
+
+namespace Impl {
+
+namespace _parse_view_extents_impl {
+
+template <class T>
+struct _all_remaining_extents_dynamic : std::true_type {};
+
+template <class T>
+struct _all_remaining_extents_dynamic<T*> : _all_remaining_extents_dynamic<T> {
+};
+
+template <class T, unsigned N>
+struct _all_remaining_extents_dynamic<T[N]> : std::false_type {};
+
+template <class T, class Result, class = void>
+struct _parse_impl {
+  using type = Result;
+};
+
+// We have to treat the case of int**[x] specially, since it *doesn't* go
+// backwards
+template <class T, ptrdiff_t... ExtentSpec>
+struct _parse_impl<T*, Kokkos::Experimental::Extents<ExtentSpec...>,
+                   std::enable_if_t<_all_remaining_extents_dynamic<T>::value>>
+    : _parse_impl<T, Kokkos::Experimental::Extents<
+                         Kokkos::Experimental::dynamic_extent, ExtentSpec...>> {
+};
+
+// int*(*[x])[y] should still work also (meaning int[][x][][y])
+template <class T, ptrdiff_t... ExtentSpec>
+struct _parse_impl<
+    T*, Kokkos::Experimental::Extents<ExtentSpec...>,
+    std::enable_if_t<!_all_remaining_extents_dynamic<T>::value>> {
+  using _next = Kokkos::Experimental::AppendExtent<
+      typename _parse_impl<T, Kokkos::Experimental::Extents<ExtentSpec...>,
+                           void>::type,
+      Kokkos::Experimental::dynamic_extent>;
+  using type = typename _next::type;
+};
+
+template <class T, ptrdiff_t... ExtentSpec, unsigned N>
+struct _parse_impl<T[N], Kokkos::Experimental::Extents<ExtentSpec...>, void>
+    : _parse_impl<
+          T, Kokkos::Experimental::Extents<ExtentSpec...,
+                                           ptrdiff_t(N)>  // TODO @pedantic this
+                                                          // could be a
+                                                          // narrowing cast
+          > {};
+
+}  // end namespace _parse_view_extents_impl
+
+template <class DataType>
+struct ParseViewExtents {
+  using type = typename _parse_view_extents_impl ::_parse_impl<
+      DataType, Kokkos::Experimental::Extents<>>::type;
+};
+
+template <class ValueType, ptrdiff_t Ext>
+struct ApplyExtent {
+  using type = ValueType[Ext];
+};
+
+template <class ValueType>
+struct ApplyExtent<ValueType, Kokkos::Experimental::dynamic_extent> {
+  using type = ValueType*;
+};
+
+template <class ValueType, unsigned N, ptrdiff_t Ext>
+struct ApplyExtent<ValueType[N], Ext> {
+  using type = typename ApplyExtent<ValueType, Ext>::type[N];
+};
+
+template <class ValueType, ptrdiff_t Ext>
+struct ApplyExtent<ValueType*, Ext> {
+  using type = ValueType * [Ext];
+};
+
+template <class ValueType>
+struct ApplyExtent<ValueType*, Kokkos::Experimental::dynamic_extent> {
+  using type =
+      typename ApplyExtent<ValueType,
+                           Kokkos::Experimental::dynamic_extent>::type*;
+};
+
+template <class ValueType, unsigned N>
+struct ApplyExtent<ValueType[N], Kokkos::Experimental::dynamic_extent> {
+  using type =
+      typename ApplyExtent<ValueType,
+                           Kokkos::Experimental::dynamic_extent>::type[N];
+};
+
+}  // end namespace Impl
+
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_EXTENTS_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Future.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Future.hpp
new file mode 100644 (file)
index 0000000..4da6c2b
--- /dev/null
@@ -0,0 +1,507 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_FUTURE_HPP
+#define KOKKOS_FUTURE_HPP
+
+//----------------------------------------------------------------------------
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_TaskScheduler_fwd.hpp>
+//----------------------------------------------------------------------------
+
+#include <impl/Kokkos_TaskQueue.hpp>
+#include <impl/Kokkos_TaskResult.hpp>
+#include <impl/Kokkos_TaskBase.hpp>
+#include <Kokkos_Atomic.hpp>
+
+#include <Kokkos_Concepts.hpp>  // is_space
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+// For now, hack this in as a partial specialization
+// TODO @tasking @cleanup Make this the "normal" class template and make the old
+// code the specialization
+template <typename ValueType, typename ExecutionSpace, typename QueueType>
+class BasicFuture<ValueType, SimpleTaskScheduler<ExecutionSpace, QueueType>> {
+ public:
+  using value_type      = ValueType;
+  using execution_space = ExecutionSpace;
+  using scheduler_type  = SimpleTaskScheduler<ExecutionSpace, QueueType>;
+  using queue_type      = typename scheduler_type::task_queue_type;
+
+ private:
+  template <class, class>
+  friend class SimpleTaskScheduler;
+  template <class, class>
+  friend class BasicFuture;
+
+  using task_base_type  = typename scheduler_type::task_base_type;
+  using task_queue_type = typename scheduler_type::task_queue_type;
+
+  using task_queue_traits = typename scheduler_type::task_queue_traits;
+  using task_scheduling_info_type =
+      typename scheduler_type::task_scheduling_info_type;
+
+  using result_storage_type = Impl::TaskResultStorage<
+      ValueType,
+      Impl::SchedulingInfoStorage<Impl::RunnableTaskBase<task_queue_traits>,
+                                  task_scheduling_info_type>>;
+
+  OwningRawPtr<task_base_type> m_task = nullptr;
+
+  KOKKOS_INLINE_FUNCTION
+  explicit BasicFuture(task_base_type* task) : m_task(task) {
+    // Note: reference count starts at 2 to account for initial increment
+    // TODO @tasking @minor DSH verify reference count here and/or encapsulate
+    // starting reference count closer to here
+  }
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture() noexcept : m_task(nullptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture(BasicFuture&& rhs) noexcept : m_task(std::move(rhs.m_task)) {
+    rhs.m_task = nullptr;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture(BasicFuture const& rhs)
+      //  : m_task(rhs.m_task)
+      : m_task(nullptr) {
+    *static_cast<task_base_type* volatile*>(&m_task) = rhs.m_task;
+    if (m_task) m_task->increment_reference_count();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture& operator=(BasicFuture&& rhs) noexcept {
+    if (m_task != rhs.m_task) {
+      clear();
+      // m_task = std::move(rhs.m_task);
+      *static_cast<task_base_type* volatile*>(&m_task) = rhs.m_task;
+      // rhs.m_task reference count is unchanged, since this is a move
+    } else {
+      // They're the same, but this is a move, so 1 fewer references now
+      rhs.clear();
+    }
+    rhs.m_task = nullptr;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture& operator=(BasicFuture const& rhs) {
+    if (m_task != rhs.m_task) {
+      clear();
+      // m_task = rhs.m_task;
+      *static_cast<task_base_type* volatile*>(&m_task) = rhs.m_task;
+    }
+    if (m_task != nullptr) {
+      m_task->increment_reference_count();
+    }
+    return *this;
+  }
+
+  //----------------------------------------
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture(
+      BasicFuture<T, S>&& rhs) noexcept  // NOLINT(google-explicit-constructor)
+      : m_task(std::move(rhs.m_task)) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Moved Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Moved Futures must have the same value_type");
+
+    // reference counts are unchanged, since this is a move
+    rhs.m_task = nullptr;
+  }
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture(
+      BasicFuture<T, S> const& rhs)  // NOLINT(google-explicit-constructor)
+                                     //: m_task(rhs.m_task)
+      : m_task(nullptr) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Copied Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Copied Futures must have the same value_type");
+
+    *static_cast<task_base_type* volatile*>(&m_task) = rhs.m_task;
+    if (m_task) m_task->increment_reference_count();
+  }
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture& operator=(BasicFuture<T, S> const& rhs) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Assigned Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Assigned Futures must have the same value_type");
+
+    if (m_task != rhs.m_task) {
+      clear();
+      // m_task = rhs.m_task;
+      *static_cast<task_base_type* volatile*>(&m_task) = rhs.m_task;
+      if (m_task != nullptr) {
+        m_task->increment_reference_count();
+      }
+    }
+    return *this;
+  }
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture& operator=(BasicFuture<T, S>&& rhs) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Assigned Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Assigned Futures must have the same value_type");
+
+    if (m_task != rhs.m_task) {
+      clear();
+      // m_task = std::move(rhs.m_task);
+      *static_cast<task_base_type* volatile*>(&m_task) = rhs.m_task;
+      // rhs.m_task reference count is unchanged, since this is a move
+    } else {
+      // They're the same, but this is a move, so 1 fewer references now
+      rhs.clear();
+    }
+    rhs.m_task = nullptr;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  ~BasicFuture() noexcept { clear(); }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  void clear() noexcept {
+    if (m_task) {
+      bool should_delete = m_task->decrement_and_check_reference_count();
+      if (should_delete) {
+        static_cast<task_queue_type*>(m_task->ready_queue_base_ptr())
+            ->deallocate(std::move(*m_task));
+      }
+    }
+    // m_task = nullptr;
+    *static_cast<task_base_type* volatile*>(&m_task) = nullptr;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_null() const noexcept { return m_task == nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_ready() const noexcept {
+    return (m_task == nullptr) || m_task->wait_queue_is_consumed();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const typename Impl::TaskResult<ValueType>::reference_type get() const {
+    KOKKOS_EXPECTS(is_ready());
+    return static_cast<result_storage_type*>(m_task)->value_reference();
+    // return Impl::TaskResult<ValueType>::get(m_task);
+  }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// OLD CODE
+////////////////////////////////////////////////////////////////////////////////
+
+template <typename ValueType, typename Scheduler>
+class BasicFuture {
+ private:
+  template <typename, typename>
+  friend class BasicTaskScheduler;
+  template <typename, typename>
+  friend class BasicFuture;
+  friend class Impl::TaskBase;
+  template <typename, typename, typename>
+  friend class Impl::Task;
+
+  //----------------------------------------
+
+ public:
+  //----------------------------------------
+
+  using scheduler_type  = Scheduler;
+  using queue_type      = typename scheduler_type::queue_type;
+  using execution_space = typename scheduler_type::execution_space;
+  using value_type      = ValueType;
+
+  //----------------------------------------
+
+ private:
+  //----------------------------------------
+
+  using task_base = Impl::TaskBase;
+
+  task_base* m_task;
+
+  KOKKOS_INLINE_FUNCTION explicit BasicFuture(task_base* task)
+      : m_task(nullptr) {
+    if (task) queue_type::assign(&m_task, task);
+  }
+
+  //----------------------------------------
+
+ public:
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_null() const { return nullptr == m_task; }
+
+  KOKKOS_INLINE_FUNCTION
+  int reference_count() const {
+    return nullptr != m_task ? m_task->reference_count() : 0;
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  void clear() {
+    if (m_task) queue_type::assign(&m_task, nullptr);
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  ~BasicFuture() { clear(); }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture() noexcept : m_task(nullptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture(BasicFuture&& rhs) noexcept : m_task(rhs.m_task) {
+    rhs.m_task = nullptr;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture(const BasicFuture& rhs) : m_task(nullptr) {
+    if (rhs.m_task) queue_type::assign(&m_task, rhs.m_task);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture& operator=(BasicFuture&& rhs) noexcept {
+    clear();
+    m_task     = rhs.m_task;
+    rhs.m_task = nullptr;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  BasicFuture& operator=(BasicFuture const& rhs) {
+    if (m_task || rhs.m_task) queue_type::assign(&m_task, rhs.m_task);
+    return *this;
+  }
+
+  //----------------------------------------
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture(
+      BasicFuture<T, S>&& rhs) noexcept  // NOLINT(google-explicit-constructor)
+      : m_task(rhs.m_task) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Assigned Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Assigned Futures must have the same value_type");
+
+    rhs.m_task = 0;
+  }
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture(
+      BasicFuture<T, S> const& rhs)  // NOLINT(google-explicit-constructor)
+      : m_task(nullptr) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Assigned Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Assigned Futures must have the same value_type");
+
+    if (rhs.m_task) queue_type::assign(&m_task, rhs.m_task);
+  }
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture& operator=(BasicFuture<T, S> const& rhs) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Assigned Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Assigned Futures must have the same value_type");
+
+    if (m_task || rhs.m_task) queue_type::assign(&m_task, rhs.m_task);
+    return *this;
+  }
+
+  template <class T, class S>
+  KOKKOS_INLINE_FUNCTION BasicFuture& operator=(BasicFuture<T, S>&& rhs) {
+    static_assert(std::is_void<scheduler_type>::value ||
+                      std::is_same<scheduler_type, S>::value,
+                  "Assigned Futures must have the same scheduler");
+
+    static_assert(
+        std::is_void<value_type>::value || std::is_same<value_type, T>::value,
+        "Assigned Futures must have the same value_type");
+
+    clear();
+    m_task     = rhs.m_task;
+    rhs.m_task = 0;
+    return *this;
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  int is_ready() const noexcept {
+    return (nullptr == m_task) ||
+           (reinterpret_cast<task_base*>(task_base::LockTag) == m_task->m_wait);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const typename Impl::TaskResult<ValueType>::reference_type get() const {
+    if (nullptr == m_task) {
+      Kokkos::abort("Kokkos:::Future::get ERROR: is_null()");
+    }
+    return Impl::TaskResult<ValueType>::get(m_task);
+  }
+};
+
+// Is a Future with the given execution space
+template <typename, typename ExecSpace = void>
+struct is_future : public std::false_type {};
+
+template <typename ValueType, typename Scheduler, typename ExecSpace>
+struct is_future<BasicFuture<ValueType, Scheduler>, ExecSpace>
+    : std::integral_constant<
+          bool,
+          std::is_same<ExecSpace, typename Scheduler::execution_space>::value ||
+              std::is_void<ExecSpace>::value> {};
+
+////////////////////////////////////////////////////////////////////////////////
+// END OLD CODE
+////////////////////////////////////////////////////////////////////////////////
+
+namespace Impl {
+
+template <class Arg1, class Arg2>
+class ResolveFutureArgOrder {
+ private:
+  enum { Arg1_is_space = Kokkos::is_space<Arg1>::value };
+  enum { Arg2_is_space = Kokkos::is_space<Arg2>::value };
+  enum { Arg1_is_value = !Arg1_is_space && !std::is_void<Arg1>::value };
+  enum { Arg2_is_value = !Arg2_is_space && !std::is_void<Arg2>::value };
+
+  static_assert(!(Arg1_is_space && Arg2_is_space),
+                "Future cannot be given two spaces");
+
+  static_assert(!(Arg1_is_value && Arg2_is_value),
+                "Future cannot be given two value types");
+
+  using value_type =
+      std::conditional_t<Arg1_is_value, Arg1,
+                         std::conditional_t<Arg2_is_value, Arg2, void>>;
+
+  using execution_space = typename std::conditional_t<
+      Arg1_is_space, Arg1,
+      std::conditional_t<Arg2_is_space, Arg2, void>>::execution_space;
+
+ public:
+  using type = BasicFuture<value_type, TaskScheduler<execution_space>>;
+};
+
+}  // end namespace Impl
+
+/**
+ *
+ *  Future< space >  // value_type == void
+ *  Future< value >  // space == Default
+ *  Future< value , space >
+ *
+ */
+template <class Arg1 = void, class Arg2 = void>
+using Future = typename Impl::ResolveFutureArgOrder<Arg1, Arg2>::type;
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_FUTURE */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Graph.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Graph.hpp
new file mode 100644 (file)
index 0000000..1f71665
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_GRAPH_HPP
+#define KOKKOS_GRAPH_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Error.hpp>  // KOKKOS_EXPECTS
+
+#include <Kokkos_Graph_fwd.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp>
+
+// GraphAccess needs to be defined, not just declared
+#include <impl/Kokkos_GraphImpl.hpp>
+
+#include <impl/Kokkos_Utilities.hpp>  // fold emulation
+
+#include <functional>
+#include <memory>
+
+namespace Kokkos {
+namespace Experimental {
+
+//==============================================================================
+// <editor-fold desc="Graph"> {{{1
+
+template <class ExecutionSpace>
+struct KOKKOS_ATTRIBUTE_NODISCARD Graph {
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="public member types"> {{{2
+
+  using execution_space = ExecutionSpace;
+  using graph           = Graph;
+
+  // </editor-fold> end public member types }}}2
+  //----------------------------------------------------------------------------
+
+ private:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="friends"> {{{2
+
+  friend struct Kokkos::Impl::GraphAccess;
+
+  // </editor-fold> end friends }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="private data members"> {{{2
+
+  using impl_t                       = Kokkos::Impl::GraphImpl<ExecutionSpace>;
+  std::shared_ptr<impl_t> m_impl_ptr = nullptr;
+
+  // </editor-fold> end private data members }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="private ctors"> {{{2
+
+  // Note: only create_graph() uses this constructor, but we can't just make
+  // that a friend instead of GraphAccess because of the way that friend
+  // function template injection works.
+  explicit Graph(std::shared_ptr<impl_t> arg_impl_ptr)
+      : m_impl_ptr(std::move(arg_impl_ptr)) {}
+
+  // </editor-fold> end private ctors }}}2
+  //----------------------------------------------------------------------------
+
+ public:
+  ExecutionSpace const& get_execution_space() const {
+    return m_impl_ptr->get_execution_space();
+  }
+
+  void submit() const {
+    KOKKOS_EXPECTS(bool(m_impl_ptr))
+    (*m_impl_ptr).submit();
+  }
+};
+
+// </editor-fold> end Graph }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="when_all"> {{{1
+
+template <class... PredecessorRefs>
+// constraints (not intended for subsumption, though...)
+//   ((remove_cvref_t<PredecessorRefs> is a specialization of
+//        GraphNodeRef with get_root().get_graph_impl() as its GraphImpl)
+//      && ...)
+auto when_all(PredecessorRefs&&... arg_pred_refs) {
+  // TODO @graph @desul-integration check the constraints and preconditions
+  //                                once we have folded conjunctions from
+  //                                desul
+  static_assert(sizeof...(PredecessorRefs) > 0,
+                "when_all() needs at least one predecessor.");
+  auto graph_ptr_impl =
+      Kokkos::Impl::GraphAccess::get_graph_weak_ptr(
+          std::get<0>(std::forward_as_tuple(arg_pred_refs...)))
+          .lock();
+  auto node_ptr_impl = graph_ptr_impl->create_aggregate_ptr(arg_pred_refs...);
+  graph_ptr_impl->add_node(node_ptr_impl);
+  KOKKOS_IMPL_FOLD_COMMA_OPERATOR(
+      graph_ptr_impl->add_predecessor(node_ptr_impl, arg_pred_refs) /* ... */);
+  return Kokkos::Impl::GraphAccess::make_graph_node_ref(
+      std::move(graph_ptr_impl), std::move(node_ptr_impl));
+}
+
+// </editor-fold> end when_all }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="create_graph"> {{{1
+
+template <class ExecutionSpace, class Closure>
+Graph<ExecutionSpace> create_graph(ExecutionSpace ex, Closure&& arg_closure) {
+  // Create a shared pointer to the graph:
+  // We need an attorney class here so we have an implementation friend to
+  // create a Graph class without graph having public constructors. We can't
+  // just make `create_graph` itself a friend because of the way that friend
+  // function template injection works.
+  auto rv = Kokkos::Impl::GraphAccess::construct_graph(std::move(ex));
+  // Invoke the user's graph construction closure
+  ((Closure &&) arg_closure)(Kokkos::Impl::GraphAccess::create_root_ref(rv));
+  // and given them back the graph
+  // KOKKOS_ENSURES(rv.m_impl_ptr.use_count() == 1)
+  return rv;
+}
+
+template <
+    class ExecutionSpace = DefaultExecutionSpace,
+    class Closure = Kokkos::Impl::DoNotExplicitlySpecifyThisTemplateParameter>
+Graph<ExecutionSpace> create_graph(Closure&& arg_closure) {
+  return create_graph(ExecutionSpace{}, (Closure &&) arg_closure);
+}
+
+// </editor-fold> end create_graph }}}1
+//==============================================================================
+
+}  // end namespace Experimental
+}  // namespace Kokkos
+
+// Even though these things are separable, include them here for now so that
+// the user only needs to include Kokkos_Graph.hpp to get the whole facility.
+#include <Kokkos_GraphNode.hpp>
+
+#include <impl/Kokkos_GraphNodeImpl.hpp>
+#include <impl/Kokkos_Default_Graph_Impl.hpp>
+#include <Cuda/Kokkos_Cuda_Graph_Impl.hpp>
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH
+#endif
+#endif  // KOKKOS_GRAPH_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_GraphNode.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_GraphNode.hpp
new file mode 100644 (file)
index 0000000..6eab5ec
--- /dev/null
@@ -0,0 +1,467 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_KOKKOS_GRAPHNODE_HPP
+#define KOKKOS_KOKKOS_GRAPHNODE_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <impl/Kokkos_Error.hpp>  // contract macros
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Graph_fwd.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp>
+#include <Kokkos_Parallel_Reduce.hpp>
+#include <impl/Kokkos_GraphImpl_Utilities.hpp>
+#include <impl/Kokkos_GraphImpl.hpp>  // GraphAccess
+
+#include <memory>  // std::shared_ptr
+
+namespace Kokkos {
+namespace Experimental {
+
+template <class ExecutionSpace, class Kernel /*= TypeErasedTag*/,
+          class Predecessor /*= TypeErasedTag*/>
+class GraphNodeRef {
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="template parameter constraints"> {{{2
+
+  // Note: because of these assertions, instantiating this class template is not
+  //       intended to be SFINAE-safe, so do validation before you instantiate.
+
+// WORKAROUND Could not get it to compile with IBM XL V16.1.1
+#ifndef KOKKOS_COMPILER_IBM
+  static_assert(
+      std::is_same<Predecessor, TypeErasedTag>::value ||
+          Kokkos::Impl::is_specialization_of<Predecessor, GraphNodeRef>::value,
+      "Invalid predecessor template parameter given to GraphNodeRef");
+#endif
+
+  static_assert(
+      Kokkos::is_execution_space<ExecutionSpace>::value,
+      "Invalid execution space template parameter given to GraphNodeRef");
+
+  static_assert(std::is_same<Predecessor, TypeErasedTag>::value ||
+                    Kokkos::Impl::is_graph_kernel<Kernel>::value,
+                "Invalid kernel template parameter given to GraphNodeRef");
+
+  static_assert(!Kokkos::Impl::is_more_type_erased<Kernel, Predecessor>::value,
+                "The kernel of a graph node can't be more type-erased than the "
+                "predecessor");
+
+  // </editor-fold> end template parameter constraints }}}2
+  //----------------------------------------------------------------------------
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="public member types"> {{{2
+
+  using execution_space   = ExecutionSpace;
+  using graph_kernel      = Kernel;
+  using graph_predecessor = Predecessor;
+
+  // </editor-fold> end public member types }}}2
+  //----------------------------------------------------------------------------
+
+ private:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Friends"> {{{2
+
+  template <class, class, class>
+  friend class GraphNodeRef;
+  friend struct Kokkos::Impl::GraphAccess;
+
+  // </editor-fold> end Friends }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Private Data Members"> {{{2
+
+  using graph_impl_t = Kokkos::Impl::GraphImpl<ExecutionSpace>;
+  std::weak_ptr<graph_impl_t> m_graph_impl;
+
+  // TODO @graphs figure out if we can get away with a weak reference here?
+  //              GraphNodeRef instances shouldn't be stored by users outside
+  //              of the create_graph closure, and so if we restructure things
+  //              slightly, we could make it so that the graph owns the
+  //              node_impl_t instance and this only holds a std::weak_ptr to
+  //              it.
+  using node_impl_t =
+      Kokkos::Impl::GraphNodeImpl<ExecutionSpace, Kernel, Predecessor>;
+  std::shared_ptr<node_impl_t> m_node_impl;
+
+  // </editor-fold> end Private Data Members }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Implementation detail accessors"> {{{2
+
+  // Internally, use shallow constness
+  node_impl_t& get_node_impl() const { return *m_node_impl.get(); }
+  std::shared_ptr<node_impl_t> const& get_node_ptr() const& {
+    return m_node_impl;
+  }
+  std::shared_ptr<node_impl_t> get_node_ptr() && {
+    return std::move(m_node_impl);
+  }
+  std::weak_ptr<graph_impl_t> get_graph_weak_ptr() const {
+    return m_graph_impl;
+  }
+
+  // </editor-fold> end Implementation detail accessors }}}2
+  //----------------------------------------------------------------------------
+
+  // TODO kernel name propagation and exposure
+
+  template <class NextKernelDeduced>
+  auto _then_kernel(NextKernelDeduced&& arg_kernel) const {
+    // readability note:
+    //   std::remove_cvref_t<NextKernelDeduced> is a specialization of
+    //   Kokkos::Impl::GraphNodeKernelImpl:
+    static_assert(Kokkos::Impl::is_specialization_of<
+                      Kokkos::Impl::remove_cvref_t<NextKernelDeduced>,
+                      Kokkos::Impl::GraphNodeKernelImpl>::value,
+                  "Kokkos internal error");
+
+    auto graph_ptr = m_graph_impl.lock();
+    KOKKOS_EXPECTS(bool(graph_ptr))
+
+    using next_kernel_t = Kokkos::Impl::remove_cvref_t<NextKernelDeduced>;
+
+    using return_t = GraphNodeRef<ExecutionSpace, next_kernel_t, GraphNodeRef>;
+
+    auto rv = Kokkos::Impl::GraphAccess::make_graph_node_ref(
+        m_graph_impl,
+        Kokkos::Impl::GraphAccess::make_node_shared_ptr<
+            typename return_t::node_impl_t>(
+            m_node_impl->execution_space_instance(),
+            Kokkos::Impl::_graph_node_kernel_ctor_tag{},
+            (NextKernelDeduced &&) arg_kernel,
+            // *this is the predecessor
+            Kokkos::Impl::_graph_node_predecessor_ctor_tag{}, *this));
+
+    // Add the node itself to the backend's graph data structure, now that
+    // everything is set up.
+    graph_ptr->add_node(rv.m_node_impl);
+    // Add the predecessaor we stored in the constructor above in the backend's
+    // data structure, now that everything is set up.
+    graph_ptr->add_predecessor(rv.m_node_impl, *this);
+    KOKKOS_ENSURES(bool(rv.m_node_impl))
+    return rv;
+  }
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Private constructors"> {{{2
+
+  GraphNodeRef(std::weak_ptr<graph_impl_t> arg_graph_impl,
+               std::shared_ptr<node_impl_t> arg_node_impl)
+      : m_graph_impl(std::move(arg_graph_impl)),
+        m_node_impl(std::move(arg_node_impl)) {}
+
+  // </editor-fold> end Private constructors }}}2
+  //----------------------------------------------------------------------------
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Constructors, destructors, and assignment"> {{{2
+
+  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+  // <editor-fold desc="rule of 6 ctors"> {{{3
+
+  // Copyable and movable (basically just shared_ptr semantics
+  GraphNodeRef() noexcept               = default;
+  GraphNodeRef(GraphNodeRef const&)     = default;
+  GraphNodeRef(GraphNodeRef&&) noexcept = default;
+  GraphNodeRef& operator=(GraphNodeRef const&) = default;
+  GraphNodeRef& operator=(GraphNodeRef&&) noexcept = default;
+  ~GraphNodeRef()                                  = default;
+
+  // </editor-fold> end rule of 6 ctors }}}3
+  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+  // <editor-fold desc="Type-erasing converting ctor and assignment"> {{{3
+
+  template <
+      class OtherKernel, class OtherPredecessor,
+      std::enable_if_t<
+          // Not a copy/move constructor
+          !std::is_same<GraphNodeRef, GraphNodeRef<execution_space, OtherKernel,
+                                                   OtherPredecessor>>::value &&
+              // must be an allowed type erasure of the kernel
+              Kokkos::Impl::is_compatible_type_erasure<OtherKernel,
+                                                       graph_kernel>::value &&
+              // must be an allowed type erasure of the predecessor
+              Kokkos::Impl::is_compatible_type_erasure<
+                  OtherPredecessor, graph_predecessor>::value,
+          int> = 0>
+  /* implicit */
+  GraphNodeRef(
+      GraphNodeRef<execution_space, OtherKernel, OtherPredecessor> const& other)
+      : m_graph_impl(other.m_graph_impl), m_node_impl(other.m_node_impl) {}
+
+  // Note: because this is an implicit conversion (as is supposed to be the
+  //       case with most type-erasing wrappers like this), we don't also need
+  //       a converting assignment operator.
+
+  // </editor-fold> end Type-erasing converting ctor and assignment }}}3
+  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+  // </editor-fold> end Constructors, destructors, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="then_parallel_for"> {{{2
+
+  template <
+      class Policy, class Functor,
+      std::enable_if_t<
+          // equivalent to:
+          //   requires Kokkos::ExecutionPolicy<remove_cvref_t<Policy>>
+          is_execution_policy<Kokkos::Impl::remove_cvref_t<Policy>>::value,
+          // --------------------
+          int> = 0>
+  auto then_parallel_for(std::string arg_name, Policy&& arg_policy,
+                         Functor&& functor) const {
+    //----------------------------------------
+    KOKKOS_EXPECTS(!m_graph_impl.expired())
+    KOKKOS_EXPECTS(bool(m_node_impl))
+    // TODO @graph restore this expectation once we add comparability to space
+    //      instances
+    // KOKKOS_EXPECTS(
+    //   arg_policy.space() == m_graph_impl->get_execution_space());
+
+    // needs to static assert constraint: DataParallelFunctor<Functor>
+
+    using policy_t = Kokkos::Impl::remove_cvref_t<Policy>;
+    // constraint check: same execution space type (or defaulted, maybe?)
+    static_assert(
+        std::is_same<typename policy_t::execution_space,
+                     execution_space>::value,
+        // TODO @graph make defaulted execution space work
+        //|| policy_t::execution_space_is_defaulted,
+        "Execution Space mismatch between execution policy and graph");
+
+    auto policy = Experimental::require((Policy &&) arg_policy,
+                                        Kokkos::Impl::KernelInGraphProperty{});
+
+    using next_policy_t = decltype(policy);
+    using next_kernel_t =
+        Kokkos::Impl::GraphNodeKernelImpl<ExecutionSpace, next_policy_t,
+                                          std::decay_t<Functor>,
+                                          Kokkos::ParallelForTag>;
+    return this->_then_kernel(next_kernel_t{std::move(arg_name), policy.space(),
+                                            (Functor &&) functor,
+                                            (Policy &&) policy});
+  }
+
+  template <
+      class Policy, class Functor,
+      std::enable_if_t<
+          // equivalent to:
+          //   requires Kokkos::ExecutionPolicy<remove_cvref_t<Policy>>
+          is_execution_policy<Kokkos::Impl::remove_cvref_t<Policy>>::value,
+          // --------------------
+          int> = 0>
+  auto then_parallel_for(Policy&& policy, Functor&& functor) const {
+    // needs to static assert constraint: DataParallelFunctor<Functor>
+    return this->then_parallel_for("", (Policy &&) policy,
+                                   (Functor &&) functor);
+  }
+
+  template <class Functor>
+  auto then_parallel_for(std::string name, std::size_t n,
+                         Functor&& functor) const {
+    // needs to static assert constraint: DataParallelFunctor<Functor>
+    return this->then_parallel_for(std::move(name),
+                                   Kokkos::RangePolicy<execution_space>(0, n),
+                                   (Functor &&) functor);
+  }
+
+  template <class Functor>
+  auto then_parallel_for(std::size_t n, Functor&& functor) const {
+    // needs to static assert constraint: DataParallelFunctor<Functor>
+    return this->then_parallel_for("", n, (Functor &&) functor);
+  }
+
+  // </editor-fold> end then_parallel_for }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="then_parallel_reduce"> {{{2
+
+  template <
+      class Policy, class Functor, class ReturnType,
+      std::enable_if_t<
+          // equivalent to:
+          //   requires Kokkos::ExecutionPolicy<remove_cvref_t<Policy>>
+          is_execution_policy<Kokkos::Impl::remove_cvref_t<Policy>>::value,
+          // --------------------
+          int> = 0>
+  auto then_parallel_reduce(std::string arg_name, Policy&& arg_policy,
+                            Functor&& functor,
+                            ReturnType&& return_value) const {
+    auto graph_impl_ptr = m_graph_impl.lock();
+    KOKKOS_EXPECTS(bool(graph_impl_ptr))
+    KOKKOS_EXPECTS(bool(m_node_impl))
+    // TODO @graph restore this expectation once we add comparability to space
+    //      instances
+    // KOKKOS_EXPECTS(
+    //   arg_policy.space() == m_graph_impl->get_execution_space());
+
+    // needs static assertion of constraint:
+    //   DataParallelReductionFunctor<Functor, ReturnType>
+
+    using policy_t = std::remove_cv_t<std::remove_reference_t<Policy>>;
+    static_assert(
+        std::is_same<typename policy_t::execution_space,
+                     execution_space>::value,
+        // TODO @graph make defaulted execution space work
+        // || policy_t::execution_space_is_defaulted,
+        "Execution Space mismatch between execution policy and graph");
+
+    // This is also just an expectation, but it's one that we expect the user
+    // to interact with (even in release mode), so we should throw an exception
+    // with an explanation rather than just doing a contract assertion.
+    // We can't static_assert this because of the way that Reducers store
+    // whether or not they point to a View as a runtime boolean rather than part
+    // of the type.
+    if (Kokkos::Impl::parallel_reduce_needs_fence(
+            graph_impl_ptr->get_execution_space(), return_value)) {
+      Kokkos::Impl::throw_runtime_exception(
+          "Parallel reductions in graphs can't operate on Reducers that "
+          "reference a scalar because they can't complete synchronously. Use a "
+          "Kokkos::View instead and keep in mind the result will only be "
+          "available once the graph is submitted (or in tasks that depend on "
+          "this one).");
+    }
+
+    //----------------------------------------
+    // This is a disaster, but I guess it's not a my disaster to fix right now
+    using return_type_remove_cvref =
+        std::remove_cv_t<std::remove_reference_t<ReturnType>>;
+    static_assert(Kokkos::is_view<return_type_remove_cvref>::value ||
+                      Kokkos::is_reducer<return_type_remove_cvref>::value,
+                  "Output argument to parallel reduce in a graph must be a "
+                  "View or a Reducer");
+    using return_type =
+        // Yes, you do really have to do this...
+        std::conditional_t<Kokkos::is_reducer<return_type_remove_cvref>::value,
+                           return_type_remove_cvref,
+                           const return_type_remove_cvref>;
+    using functor_type = Kokkos::Impl::remove_cvref_t<Functor>;
+    // see Kokkos_Parallel_Reduce.hpp for how these details are used there;
+    // we're just doing the same thing here
+    using return_value_adapter =
+        Kokkos::Impl::ParallelReduceReturnValue<void, return_type,
+                                                functor_type>;
+    // End of Kokkos reducer disaster
+    //----------------------------------------
+
+    auto policy = Experimental::require((Policy &&) arg_policy,
+                                        Kokkos::Impl::KernelInGraphProperty{});
+
+    using next_policy_t = decltype(policy);
+    using next_kernel_t = Kokkos::Impl::GraphNodeKernelImpl<
+        ExecutionSpace, next_policy_t, functor_type, Kokkos::ParallelReduceTag,
+        typename return_value_adapter::reducer_type>;
+
+    return this->_then_kernel(next_kernel_t{
+        std::move(arg_name), graph_impl_ptr->get_execution_space(),
+        (Functor &&) functor, (Policy &&) policy,
+        return_value_adapter::return_value(return_value, functor)});
+  }
+
+  template <
+      class Policy, class Functor, class ReturnType,
+      std::enable_if_t<
+          // equivalent to:
+          //   requires Kokkos::ExecutionPolicy<remove_cvref_t<Policy>>
+          is_execution_policy<Kokkos::Impl::remove_cvref_t<Policy>>::value,
+          // --------------------
+          int> = 0>
+  auto then_parallel_reduce(Policy&& arg_policy, Functor&& functor,
+                            ReturnType&& return_value) const {
+    return this->then_parallel_reduce("", (Policy &&) arg_policy,
+                                      (Functor &&) functor,
+                                      (ReturnType &&) return_value);
+  }
+
+  template <class Functor, class ReturnType>
+  auto then_parallel_reduce(std::string label,
+                            typename execution_space::size_type idx_end,
+                            Functor&& functor,
+                            ReturnType&& return_value) const {
+    return this->then_parallel_reduce(
+        std::move(label), Kokkos::RangePolicy<execution_space>{0, idx_end},
+        (Functor &&) functor, (ReturnType &&) return_value);
+  }
+
+  template <class Functor, class ReturnType>
+  auto then_parallel_reduce(typename execution_space::size_type idx_end,
+                            Functor&& functor,
+                            ReturnType&& return_value) const {
+    return this->then_parallel_reduce("", idx_end, (Functor &&) functor,
+                                      (ReturnType &&) return_value);
+  }
+
+  // </editor-fold> end then_parallel_reduce }}}2
+  //----------------------------------------------------------------------------
+
+  // TODO @graph parallel scan, deep copy, etc.
+};
+
+}  // end namespace Experimental
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_GRAPHNODE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Graph_fwd.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Graph_fwd.hpp
new file mode 100644 (file)
index 0000000..6f63965
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_GRAPH_FWD_HPP
+#define KOKKOS_KOKKOS_GRAPH_FWD_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+struct TypeErasedTag {};
+
+template <class ExecutionSpace>
+struct Graph;
+
+template <class ExecutionSpace, class Kernel = TypeErasedTag,
+          class Predecessor = TypeErasedTag>
+class GraphNodeRef;
+
+}  // end namespace Experimental
+}  // end namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_GRAPH_FWD
+#endif
+#endif  // KOKKOS_KOKKOS_GRAPH_FWD_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_HBWSpace.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_HBWSpace.hpp
new file mode 100644 (file)
index 0000000..0c5dbbd
--- /dev/null
@@ -0,0 +1,380 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_HBWSPACE_HPP
+#define KOKKOS_HBWSPACE_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_HBWSPACE
+
+#include <Kokkos_HostSpace.hpp>
+
+namespace Kokkos {
+
+namespace Experimental {
+
+namespace Impl {
+
+/// \brief Initialize lock array for arbitrary size atomics.
+///
+/// Arbitrary atomics are implemented using a hash table of locks
+/// where the hash value is derived from the address of the
+/// object for which an atomic operation is performed.
+/// This function initializes the locks to zero (unset).
+void init_lock_array_hbw_space();
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+bool lock_address_hbw_space(void* ptr);
+
+/// \brief Release lock for the address
+///
+/// This function releases the lock for the hash value derived
+/// from the provided ptr. This function should only be called
+/// after previously successfully acquiring a lock with
+/// lock_address.
+void unlock_address_hbw_space(void* ptr);
+
+}  // namespace Impl
+
+}  // namespace Experimental
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+namespace Experimental {
+
+/// \class HBWSpace
+/// \brief Memory management for host memory.
+///
+/// HBWSpace is a memory space that governs host memory.  "Host"
+/// memory means the usual CPU-accessible memory.
+class HBWSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space = HBWSpace;
+  using size_type    = size_t;
+
+  /// \typedef execution_space
+  /// \brief Default execution space for this memory space.
+  ///
+  /// Every memory space has a default execution space.  This is
+  /// useful for things like initializing a View (which happens in
+  /// parallel using the View's default execution space).
+  using execution_space = Kokkos::DefaultHostExecutionSpace;
+
+  //! This memory space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  /**\brief  Default memory space instance */
+  HBWSpace();
+  HBWSpace(const HBWSpace& rhs) = default;
+  HBWSpace& operator=(const HBWSpace&) = default;
+  ~HBWSpace()                          = default;
+
+  /**\brief  Non-default memory space instance to choose allocation mechansim,
+   * if available */
+
+  enum AllocationMechanism {
+    STD_MALLOC,
+    POSIX_MEMALIGN,
+    POSIX_MMAP,
+    INTEL_MM_ALLOC
+  };
+
+  explicit HBWSpace(const AllocationMechanism&);
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return "HBW"; }
+
+ private:
+  AllocationMechanism m_alloc_mech;
+  friend class Kokkos::Impl::SharedAllocationRecord<
+      Kokkos::Experimental::HBWSpace, void>;
+};
+
+}  // namespace Experimental
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>
+    : public SharedAllocationRecord<void, void> {
+ private:
+  friend Kokkos::Experimental::HBWSpace;
+
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  static void deallocate(RecordBase*);
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  /**\brief  Root record for tracked allocations from this HBWSpace instance */
+  static RecordBase s_root_record;
+#endif
+
+  const Kokkos::Experimental::HBWSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord()
+#if defined( \
+    KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
+      noexcept
+#endif
+      ;
+  SharedAllocationRecord() = default;
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::HBWSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate);
+
+ public:
+  inline std::string get_label() const {
+    return std::string(RecordBase::head()->m_label);
+  }
+
+  KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
+      const Kokkos::Experimental::HBWSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size) {
+    KOKKOS_IF_ON_HOST((return new SharedAllocationRecord(arg_space, arg_label,
+                                                         arg_alloc_size);))
+    KOKKOS_IF_ON_DEVICE(((void)arg_space; (void)arg_label; (void)arg_alloc_size;
+                         return nullptr;))
+  }
+
+  /**\brief  Allocate tracked memory in the space */
+  static void* allocate_tracked(const Kokkos::Experimental::HBWSpace& arg_space,
+                                const std::string& arg_label,
+                                const size_t arg_alloc_size);
+
+  /**\brief  Reallocate tracked memory in the space */
+  static void* reallocate_tracked(void* const arg_alloc_ptr,
+                                  const size_t arg_alloc_size);
+
+  /**\brief  Deallocate tracked memory in the space */
+  static void deallocate_tracked(void* const arg_alloc_ptr);
+
+  static SharedAllocationRecord* get_record(void* arg_alloc_ptr);
+
+  static void print_records(std::ostream&,
+                            const Kokkos::Experimental::HBWSpace&,
+                            bool detail = false);
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+static_assert(
+    Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::HBWSpace,
+                                    Kokkos::Experimental::HBWSpace>::assignable,
+    "");
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::Experimental::HBWSpace> {
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HBWSpace, Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <>
+struct DeepCopy<Kokkos::Experimental::HBWSpace, Kokkos::Experimental::HBWSpace,
+                DefaultHostExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
+           size_t n) {
+    hostspace_parallel_deepcopy(exec, dst, src, n);
+  }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<Kokkos::Experimental::HBWSpace, Kokkos::Experimental::HBWSpace,
+                ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence(
+        "Kokkos::Impl::DeepCopy<Kokkos::Experimental::HBWSpace, "
+        "Kokkos::Experimental::HBWSpace,ExecutionSpace::DeepCopy: fence "
+        "before copy");
+    hostspace_parallel_deepcopy_async(dst, src, n);
+  }
+};
+
+template <>
+struct DeepCopy<HostSpace, Kokkos::Experimental::HBWSpace,
+                DefaultHostExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
+           size_t n) {
+    hostspace_parallel_deepcopy(exec, dst, src, n);
+  }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<HostSpace, Kokkos::Experimental::HBWSpace, ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence(
+        "Kokkos::Impl::DeepCopy<HostSpace, Kokkos::Experimental::HBWSpace, "
+        "ExecutionSpace>::DeepCopy: fence before copy");
+    hostspace_parallel_deepcopy_async(copy_space, dst, src, n);
+  }
+};
+
+template <>
+struct DeepCopy<Kokkos::Experimental::HBWSpace, HostSpace,
+                DefaultHostExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
+           size_t n) {
+    hostspace_parallel_deepcopy(exec, dst, src, n);
+  }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<Kokkos::Experimental::HBWSpace, HostSpace, ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence(
+        "Kokkos::Impl::DeepCopy<Kokkos::Experimental::HBWSpace, HostSpace, "
+        "ExecutionSpace>::DeepCopy: fence before copy");
+    hostspace_parallel_deepcopy_async(dst, src, n);
+  }
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+#endif
+#endif  // #define KOKKOS_HBWSPACE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_HIP.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_HIP.hpp
new file mode 100644 (file)
index 0000000..c387b59
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_HIP_HPP
+#define KOKKOS_HIP_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+
+#if defined(KOKKOS_ENABLE_HIP)
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#include <Kokkos_HIP_Space.hpp>
+#include <Kokkos_Parallel.hpp>
+
+#include <HIP/Kokkos_HIP_Half_Impl_Type.hpp>
+#include <HIP/Kokkos_HIP_Half_Conversion.hpp>
+#include <HIP/Kokkos_HIP_Instance.hpp>
+#include <HIP/Kokkos_HIP_MDRangePolicy.hpp>
+#include <HIP/Kokkos_HIP_Parallel_Range.hpp>
+#include <HIP/Kokkos_HIP_Parallel_MDRange.hpp>
+#include <HIP/Kokkos_HIP_Parallel_Team.hpp>
+#include <HIP/Kokkos_HIP_UniqueToken.hpp>
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_HIP_Space.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_HIP_Space.hpp
new file mode 100644 (file)
index 0000000..8c195a0
--- /dev/null
@@ -0,0 +1,806 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_HIPSPACE_HPP
+#define KOKKOS_HIPSPACE_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+
+#if defined(KOKKOS_ENABLE_HIP)
+
+#include <iosfwd>
+#include <typeinfo>
+#include <string>
+#include <cstddef>
+#include <iosfwd>
+
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <HIP/Kokkos_HIP_Error.hpp>  // HIP_SAFE_CALL
+
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+#include <hip/hip_runtime_api.h>
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename T>
+struct is_hip_type_space : public std::false_type {};
+
+}  // namespace Impl
+
+namespace Experimental {
+/** \brief  HIP on-device memory management */
+
+class HIPSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space    = HIPSpace;
+  using execution_space = Kokkos::Experimental::HIP;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+
+  using size_type = unsigned int;
+
+  /*--------------------------------*/
+
+  HIPSpace();
+  HIPSpace(HIPSpace&& rhs)      = default;
+  HIPSpace(const HIPSpace& rhs) = default;
+  HIPSpace& operator=(HIPSpace&& rhs) = default;
+  HIPSpace& operator=(const HIPSpace& rhs) = default;
+  ~HIPSpace()                              = default;
+
+  /**\brief  Allocate untracked memory in the hip space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the hip space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return "HIP"; }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  /*--------------------------------*/
+  /** \brief  Error reporting for HostSpace attempt to access HIPSpace */
+  KOKKOS_DEPRECATED static void access_error();
+  KOKKOS_DEPRECATED static void access_error(const void* const);
+#endif
+
+ private:
+  int m_device;  ///< Which HIP device
+
+  friend class Kokkos::Impl::SharedAllocationRecord<
+      Kokkos::Experimental::HIPSpace, void>;
+};
+
+}  // namespace Experimental
+
+template <>
+struct Impl::is_hip_type_space<Experimental::HIPSpace> : public std::true_type {
+};
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Experimental {
+/** \brief  Host memory that is accessible to HIP execution space
+ *          through HIP's host-pinned memory allocation.
+ */
+class HIPHostPinnedSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  /** \brief  Memory is in HostSpace so use the HostSpace::execution_space */
+  using execution_space = HostSpace::execution_space;
+  using memory_space    = HIPHostPinnedSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using size_type       = unsigned int;
+
+  /*--------------------------------*/
+
+  HIPHostPinnedSpace();
+  HIPHostPinnedSpace(HIPHostPinnedSpace&& rhs)      = default;
+  HIPHostPinnedSpace(const HIPHostPinnedSpace& rhs) = default;
+  HIPHostPinnedSpace& operator=(HIPHostPinnedSpace&& rhs) = default;
+  HIPHostPinnedSpace& operator=(const HIPHostPinnedSpace& rhs) = default;
+  ~HIPHostPinnedSpace()                                        = default;
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return "HIPHostPinned"; }
+
+  /*--------------------------------*/
+};
+}  // namespace Experimental
+
+template <>
+struct Impl::is_hip_type_space<Experimental::HIPHostPinnedSpace>
+    : public std::true_type {};
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Experimental {
+/** \brief  Memory that is accessible to HIP execution space
+ *          and host through HIP's memory page migration.
+ */
+class HIPManagedSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  /** \brief  Memory is unified to both device and host via page migration
+   *  and therefore able to be used by HostSpace::execution_space and
+   *  DeviceSpace::execution_space.
+   */
+  //! tag this class as a kokkos memory space
+  using memory_space    = HIPManagedSpace;
+  using execution_space = Kokkos::Experimental::HIP;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using size_type       = unsigned int;
+
+  /*--------------------------------*/
+
+  HIPManagedSpace();
+  HIPManagedSpace(HIPManagedSpace&& rhs)      = default;
+  HIPManagedSpace(const HIPManagedSpace& rhs) = default;
+  HIPManagedSpace& operator=(HIPManagedSpace&& rhs) = default;
+  HIPManagedSpace& operator=(const HIPManagedSpace& rhs) = default;
+  ~HIPManagedSpace()                                     = default;
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  int m_device;  ///< Which HIP device
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return "HIPManaged"; }
+
+  /*--------------------------------*/
+};
+}  // namespace Experimental
+
+template <>
+struct Impl::is_hip_type_space<Experimental::HIPManagedSpace>
+    : public std::true_type {};
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+static_assert(
+    Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                                    Kokkos::Experimental::HIPSpace>::assignable,
+    "");
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace, Kokkos::Experimental::HIPSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace,
+                         Kokkos::Experimental::HIPHostPinnedSpace> {
+  // HostSpace::execution_space == HIPHostPinnedSpace::execution_space
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace,
+                         Kokkos::Experimental::HIPManagedSpace> {
+  // HostSpace::execution_space != HIPManagedSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace, Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                         Kokkos::Experimental::HIPHostPinnedSpace> {
+  // HIPSpace::execution_space != HIPHostPinnedSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };  // HIPSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                         Kokkos::Experimental::HIPManagedSpace> {
+  // HIPSpace::execution_space == HIPManagedSpace::execution_space
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// HIPHostPinnedSpace::execution_space == HostSpace::execution_space
+// HIPHostPinnedSpace accessible to both HIP and Host
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPHostPinnedSpace,
+                         Kokkos::HostSpace> {
+  enum : bool { assignable = false };  // Cannot access from HIP
+  enum : bool { accessible = true };   // HIPHostPinnedSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPHostPinnedSpace,
+                         Kokkos::Experimental::HIPSpace> {
+  enum : bool { assignable = false };  // Cannot access from Host
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPHostPinnedSpace,
+                         Kokkos::Experimental::HIPManagedSpace> {
+  enum : bool { assignable = false };  // different exec_space
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// HIPManagedSpace::execution_space != HostSpace::execution_space
+// HIPManagedSpace accessible to both HIP and Host
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPManagedSpace,
+                         Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };  // HIPHostPinnedSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPManagedSpace,
+                         Kokkos::Experimental::HIPSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPManagedSpace,
+                         Kokkos::Experimental::HIPHostPinnedSpace> {
+  enum : bool { assignable = false };  // different exec_space
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+};  // namespace Impl
+//----------------------------------------
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+void DeepCopyHIP(void* dst, const void* src, size_t n);
+void DeepCopyAsyncHIP(const Kokkos::Experimental::HIP& instance, void* dst,
+                      const void* src, size_t n);
+void DeepCopyAsyncHIP(void* dst, const void* src, size_t n);
+
+template <class MemSpace>
+struct DeepCopy<MemSpace, HostSpace, Kokkos::Experimental::HIP,
+                std::enable_if_t<is_hip_type_space<MemSpace>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
+  DeepCopy(const Kokkos::Experimental::HIP& instance, void* dst,
+           const void* src, size_t n) {
+    DeepCopyAsyncHIP(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace>
+struct DeepCopy<HostSpace, MemSpace, Kokkos::Experimental::HIP,
+                std::enable_if_t<is_hip_type_space<MemSpace>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
+  DeepCopy(const Kokkos::Experimental::HIP& instance, void* dst,
+           const void* src, size_t n) {
+    DeepCopyAsyncHIP(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace1, class MemSpace2>
+struct DeepCopy<MemSpace1, MemSpace2, Kokkos::Experimental::HIP,
+                std::enable_if_t<is_hip_type_space<MemSpace1>::value &&
+                                 is_hip_type_space<MemSpace2>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopyHIP(dst, src, n); }
+  DeepCopy(const Kokkos::Experimental::HIP& instance, void* dst,
+           const void* src, size_t n) {
+    DeepCopyAsyncHIP(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace1, class MemSpace2, class ExecutionSpace>
+struct DeepCopy<
+    MemSpace1, MemSpace2, ExecutionSpace,
+    std::enable_if_t<
+        is_hip_type_space<MemSpace1>::value &&
+        is_hip_type_space<MemSpace2>::value &&
+        !std::is_same<ExecutionSpace, Kokkos::Experimental::HIP>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopyHIP(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncHIP(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<") + MemSpace1::name() + "Space, " +
+        MemSpace2::name() +
+        "Space, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<
+    MemSpace, HostSpace, ExecutionSpace,
+    std::enable_if_t<
+        is_hip_type_space<MemSpace>::value &&
+        !std::is_same<ExecutionSpace, Kokkos::Experimental::HIP>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopyHIP(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncHIP(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<") + MemSpace::name() +
+        "Space, HostSpace, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<
+    HostSpace, MemSpace, ExecutionSpace,
+    std::enable_if_t<
+        is_hip_type_space<MemSpace>::value &&
+        !std::is_same<ExecutionSpace, Kokkos::Experimental::HIP>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopyHIP(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncHIP(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<HostSpace, ") + MemSpace::name() +
+        "Space, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::HIPSpace, void>
+    : public HostInaccessibleSharedAllocationRecordCommon<
+          Kokkos::Experimental::HIPSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<Kokkos::Experimental::HIPSpace>;
+  friend class HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::HIPSpace>;
+  using base_t = HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::HIPSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  static RecordBase s_root_record;
+#endif
+
+  const Kokkos::Experimental::HIPSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec*/,
+      const Kokkos::Experimental::HIPSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::HIP& exec_space,
+      const Kokkos::Experimental::HIPSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::HIPSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+};
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::HIPHostPinnedSpace, void>
+    : public SharedAllocationRecordCommon<
+          Kokkos::Experimental::HIPHostPinnedSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<
+      Kokkos::Experimental::HIPHostPinnedSpace>;
+  using base_t =
+      SharedAllocationRecordCommon<Kokkos::Experimental::HIPHostPinnedSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  static RecordBase s_root_record;
+#endif
+
+  const Kokkos::Experimental::HIPHostPinnedSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+  SharedAllocationRecord() = default;
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::Experimental::HIPHostPinnedSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::HIPHostPinnedSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+};
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::HIPManagedSpace, void>
+    : public SharedAllocationRecordCommon<
+          Kokkos::Experimental::HIPManagedSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<
+      Kokkos::Experimental::HIPManagedSpace>;
+  using base_t =
+      SharedAllocationRecordCommon<Kokkos::Experimental::HIPManagedSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  static RecordBase s_root_record;
+#endif
+
+  const Kokkos::Experimental::HIPManagedSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+  SharedAllocationRecord() = default;
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::Experimental::HIPManagedSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::HIPManagedSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+class HIPInternal;
+}
+/// \class HIP
+/// \brief Kokkos device for multicore processors in the host memory space.
+class HIP {
+ public:
+  //------------------------------------
+  //! \name Type declarations that all Kokkos devices must provide.
+  //@{
+
+  //! Tag this class as a kokkos execution space
+  using execution_space = HIP;
+  using memory_space    = HIPSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+
+  using array_layout = LayoutLeft;
+  using size_type    = HIPSpace::size_type;
+
+  using scratch_memory_space = ScratchMemorySpace<HIP>;
+
+  HIP();
+  HIP(hipStream_t stream, bool manage_stream = false);
+
+  //@}
+  //------------------------------------
+  //! \name Functions that all Kokkos devices must implement.
+  //@{
+
+  KOKKOS_INLINE_FUNCTION static int in_parallel() {
+#if defined(__HIP_DEVICE_COMPILE__)
+    return true;
+#else
+    return false;
+#endif
+  }
+
+  /** \brief Wait until all dispatched functors complete.
+   *
+   * The parallel_for or parallel_reduce dispatch of a functor may return
+   * asynchronously, before the functor completes. This method does not return
+   * until all dispatched functors on this device have completed.
+   */
+  static void impl_static_fence(const std::string& name);
+
+  void fence(const std::string& name =
+                 "Kokkos::HIP::fence(): Unnamed Instance Fence") const;
+
+  hipStream_t hip_stream() const;
+
+  /// \brief Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  /// \brief Free any resources being consumed by the device.
+  static void impl_finalize();
+
+  /** \brief  Initialize the device.
+   *
+   */
+  int hip_device() const;
+  static hipDeviceProp_t const& hip_device_prop();
+
+  static void impl_initialize(InitializationSettings const&);
+
+  static int impl_is_initialized();
+
+  //  static size_type device_arch();
+
+  static size_type detect_device_count();
+
+  static int concurrency();
+  static const char* name();
+
+  inline Impl::HIPInternal* impl_internal_space_instance() const {
+    return m_space_instance.get();
+  }
+
+  uint32_t impl_instance_id() const noexcept;
+
+ private:
+  Kokkos::Impl::HostSharedPtr<Impl::HIPInternal> m_space_instance;
+};
+}  // namespace Experimental
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<Kokkos::Experimental::HIP> {
+  static constexpr DeviceType id = DeviceType::HIP;
+  static int device_id(const Kokkos::Experimental::HIP& exec) {
+    return exec.hip_device();
+  }
+};
+}  // namespace Experimental
+}  // namespace Tools
+
+namespace Impl {
+template <class DT, class... DP>
+struct ZeroMemset<Kokkos::Experimental::HIP, DT, DP...> {
+  ZeroMemset(const Kokkos::Experimental::HIP& exec_space,
+             const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    KOKKOS_IMPL_HIP_SAFE_CALL(hipMemsetAsync(
+        dst.data(), 0,
+        dst.size() * sizeof(typename View<DT, DP...>::value_type),
+        exec_space.hip_stream()));
+  }
+
+  ZeroMemset(const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    KOKKOS_IMPL_HIP_SAFE_CALL(
+        hipMemset(dst.data(), 0,
+                  dst.size() * sizeof(typename View<DT, DP...>::value_type)));
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HIPSpace,
+                         Kokkos::Experimental::HIP::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #if defined( KOKKOS_ENABLE_HIP ) */
+#endif /* #define KOKKOS_HIPSPACE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_HPX.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_HPX.hpp
new file mode 100644 (file)
index 0000000..044e54f
--- /dev/null
@@ -0,0 +1,2395 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_HPX_HPP
+#define KOKKOS_HPX_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_HPX)
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_HostSpace.hpp>
+#include <cstddef>
+#include <iosfwd>
+
+#ifdef KOKKOS_ENABLE_HBWSPACE
+#include <Kokkos_HBWSpace.hpp>
+#endif
+
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <Kokkos_TaskScheduler.hpp>
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+#include <impl/Kokkos_FunctorAnalysis.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_TaskQueue.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+#include <hpx/local/algorithm.hpp>
+#include <hpx/local/barrier.hpp>
+#include <hpx/local/condition_variable.hpp>
+#include <hpx/local/execution.hpp>
+#include <hpx/local/future.hpp>
+#include <hpx/local/init.hpp>
+#include <hpx/local/mutex.hpp>
+#include <hpx/local/runtime.hpp>
+#include <hpx/local/thread.hpp>
+
+#include <Kokkos_UniqueToken.hpp>
+
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <sstream>
+#include <type_traits>
+#include <vector>
+
+// There are currently two different implementations for the parallel dispatch
+// functions:
+//
+// - 0: The HPX way. Unfortunately, this comes with unnecessary
+//      overheads at the moment, so there is
+// - 1: The manual way. This uses for_loop, but only spawns one task per worker
+//      thread. This is significantly faster in most cases.
+//
+// In the long run 0 should be the preferred implementation, but until HPX is
+// improved 1 will be the default.
+#ifndef KOKKOS_HPX_IMPLEMENTATION
+#define KOKKOS_HPX_IMPLEMENTATION 1
+#endif
+
+#if (KOKKOS_HPX_IMPLEMENTATION < 0) || (KOKKOS_HPX_IMPLEMENTATION > 1)
+#error "You have chosen an invalid value for KOKKOS_HPX_IMPLEMENTATION"
+#endif
+
+// [note 1]
+//
+// When using the asynchronous backend and independent instances, we explicitly
+// reset the shared data at the end of a parallel task (execute_task). We do
+// this to avoid circular references with shared pointers that would otherwise
+// never be released.
+//
+// The HPX instance holds shared data for the instance in a shared_ptr. One of
+// the pieces of shared data is the future that we use to sequence parallel
+// dispatches. When a parallel task is launched, a copy of the closure
+// (ParallelFor, ParallelReduce, etc.) is captured in the task. The closure
+// also holds the policy, the policy holds the HPX instance, the instance holds
+// the shared data (for use of buffers in the parallel task). When attaching a
+// continuation to a future, the continuation is stored in the future (shared
+// state). This means that there is a cycle future -> continuation -> closure
+// -> policy -> HPX -> shared data -> future. We break this by releasing the
+// shared data early, as (the pointer to) the shared data will not be used
+// anymore by the closure at the end of execute_task.
+//
+// We also mark the shared instance data as mutable so that we can reset it
+// from the const execute_task member function.
+
+namespace Kokkos {
+namespace Impl {
+class thread_buffer {
+  static constexpr std::size_t m_cache_line_size = 64;
+
+  std::size_t m_num_threads;
+  std::size_t m_size_per_thread;
+  std::size_t m_size_total;
+  char *m_data;
+
+  void pad_to_cache_line(std::size_t &size) {
+    size = ((size + m_cache_line_size - 1) / m_cache_line_size) *
+           m_cache_line_size;
+  }
+
+ public:
+  thread_buffer()
+      : m_num_threads(0),
+        m_size_per_thread(0),
+        m_size_total(0),
+        m_data(nullptr) {}
+  thread_buffer(const std::size_t num_threads,
+                const std::size_t size_per_thread) {
+    resize(num_threads, size_per_thread);
+  }
+  ~thread_buffer() { delete[] m_data; }
+
+  thread_buffer(const thread_buffer &) = delete;
+  thread_buffer(thread_buffer &&)      = delete;
+  thread_buffer &operator=(const thread_buffer &) = delete;
+  thread_buffer &operator=(thread_buffer) = delete;
+
+  void resize(const std::size_t num_threads,
+              const std::size_t size_per_thread) {
+    m_num_threads     = num_threads;
+    m_size_per_thread = size_per_thread;
+
+    pad_to_cache_line(m_size_per_thread);
+
+    std::size_t size_total_new = m_num_threads * m_size_per_thread;
+
+    if (m_size_total < size_total_new) {
+      delete[] m_data;
+      m_data       = new char[size_total_new];
+      m_size_total = size_total_new;
+    }
+  }
+
+  char *get(std::size_t thread_num) {
+    assert(thread_num < m_num_threads);
+    if (m_data == nullptr) {
+      return nullptr;
+    }
+    return &m_data[thread_num * m_size_per_thread];
+  }
+
+  std::size_t size_per_thread() const noexcept { return m_size_per_thread; }
+  std::size_t size_total() const noexcept { return m_size_total; }
+};
+}  // namespace Impl
+
+namespace Experimental {
+class HPX {
+ public:
+  static constexpr uint32_t impl_default_instance_id() { return 1; }
+
+ private:
+  static bool m_hpx_initialized;
+  uint32_t m_instance_id = impl_default_instance_id();
+
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+  static std::atomic<uint32_t> m_next_instance_id;
+
+ public:
+  enum class instance_mode { default_, independent };
+
+ private:
+  static uint32_t m_active_parallel_region_count;
+  static hpx::spinlock m_active_parallel_region_count_mutex;
+  static hpx::condition_variable_any m_active_parallel_region_count_cond;
+
+  struct instance_data {
+    instance_data() = default;
+    instance_data(hpx::shared_future<void> future) : m_future(future) {}
+    Kokkos::Impl::thread_buffer m_buffer;
+    hpx::shared_future<void> m_future = hpx::make_ready_future<void>();
+    hpx::spinlock m_future_mutex;
+  };
+
+  mutable std::shared_ptr<instance_data> m_independent_instance_data;
+  static instance_data m_default_instance_data;
+
+  std::reference_wrapper<Kokkos::Impl::thread_buffer> m_buffer;
+  std::reference_wrapper<hpx::shared_future<void>> m_future;
+  std::reference_wrapper<hpx::spinlock> m_future_mutex;
+#else
+  static Kokkos::Impl::thread_buffer m_default_buffer;
+#endif
+
+ public:
+  using execution_space      = HPX;
+  using memory_space         = HostSpace;
+  using device_type          = Kokkos::Device<execution_space, memory_space>;
+  using array_layout         = LayoutRight;
+  using size_type            = memory_space::size_type;
+  using scratch_memory_space = ScratchMemorySpace<HPX>;
+
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+  HPX()
+  noexcept
+      : m_instance_id(impl_default_instance_id()),
+        m_buffer(m_default_instance_data.m_buffer),
+        m_future(m_default_instance_data.m_future),
+        m_future_mutex(m_default_instance_data.m_future_mutex) {}
+
+  HPX(instance_mode mode)
+      : m_instance_id(mode == instance_mode::independent
+                          ? m_next_instance_id++
+                          : impl_default_instance_id()),
+        m_independent_instance_data(mode == instance_mode::independent
+                                        ? (new instance_data())
+                                        : nullptr),
+        m_buffer(mode == instance_mode::independent
+                     ? m_independent_instance_data->m_buffer
+                     : m_default_instance_data.m_buffer),
+        m_future(mode == instance_mode::independent
+                     ? m_independent_instance_data->m_future
+                     : m_default_instance_data.m_future),
+        m_future_mutex(mode == instance_mode::independent
+                           ? m_independent_instance_data->m_future_mutex
+                           : m_default_instance_data.m_future_mutex) {}
+
+  HPX(hpx::shared_future<void> future)
+      : m_instance_id(m_next_instance_id++),
+
+        m_independent_instance_data(new instance_data(future)),
+        m_buffer(m_independent_instance_data->m_buffer),
+        m_future(m_independent_instance_data->m_future),
+        m_future_mutex(m_independent_instance_data->m_future_mutex) {}
+
+  HPX(HPX &&other) = default;
+  HPX &operator=(HPX &&other) = default;
+  HPX(const HPX &other)       = default;
+  HPX &operator=(const HPX &other) = default;
+#else
+  HPX() noexcept {}
+#endif
+
+  void print_configuration(std::ostream &os, bool /*verbose*/ = false) const {
+    os << "HPX backend\n";
+    os << "HPX Execution Space:\n";
+    os << "  KOKKOS_ENABLE_HPX: yes\n";
+    os << "\nHPX Runtime Configuration:\n";
+  }
+  uint32_t impl_instance_id() const noexcept { return m_instance_id; }
+
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+  static bool in_parallel(HPX const &instance = HPX()) noexcept {
+    return !instance.impl_get_future().is_ready();
+  }
+#else
+  static bool in_parallel(HPX const & = HPX()) noexcept { return false; }
+#endif
+
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+  static void impl_decrement_active_parallel_region_count() {
+    std::unique_lock<hpx::spinlock> l(m_active_parallel_region_count_mutex);
+    if (--m_active_parallel_region_count == 0) {
+      l.unlock();
+      m_active_parallel_region_count_cond.notify_all();
+    };
+  }
+
+  static void impl_increment_active_parallel_region_count() {
+    std::unique_lock<hpx::spinlock> l(m_active_parallel_region_count_mutex);
+    ++m_active_parallel_region_count;
+  }
+#endif
+
+  void fence(
+      const std::string &name =
+          "Kokkos::Experimental::HPX::fence: Unnamed Instance Fence") const {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<
+        Kokkos::Experimental::HPX>(
+        name,
+        Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
+            impl_instance_id()},
+        [&]() {
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+          impl_get_future().wait();
+          // Reset the future to free variables that may have been captured in
+          // parallel regions.
+          impl_get_future() = hpx::make_ready_future<void>();
+#endif
+        });
+  }
+
+  static void impl_static_fence(const std::string &name) {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<
+        Kokkos::Experimental::HPX>(
+        name,
+        Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+            GlobalDeviceSynchronization,
+        [&]() {
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+          std::unique_lock<hpx::spinlock> l(
+              m_active_parallel_region_count_mutex);
+          m_active_parallel_region_count_cond.wait(
+              l, [&]() { return m_active_parallel_region_count == 0; });
+          // Reset the future to free variables that may have been captured in
+          // parallel regions (however, we don't have access to futures from
+          // instances other than the default instances, they will only be
+          // released by fence).
+          HPX().impl_get_future() = hpx::make_ready_future<void>();
+#endif
+        });
+  }
+
+  static hpx::execution::parallel_executor impl_get_executor() {
+    return hpx::execution::parallel_executor();
+  }
+
+  static bool is_asynchronous(HPX const & = HPX()) noexcept {
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+    return true;
+#else
+    return false;
+#endif
+  }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  static std::vector<HPX> partition(...) {
+    Kokkos::abort(
+        "Kokkos::Experimental::HPX::partition_master: can't partition an HPX "
+        "instance\n");
+    return std::vector<HPX>();
+  }
+
+  template <typename F>
+  KOKKOS_DEPRECATED static void partition_master(
+      F const &, int requested_num_partitions = 0, int = 0) {
+    if (requested_num_partitions > 1) {
+      Kokkos::abort(
+          "Kokkos::Experimental::HPX::partition_master: can't partition an "
+          "HPX instance\n");
+    }
+  }
+#endif
+
+  static int concurrency();
+  static void impl_initialize(InitializationSettings const &);
+  static bool impl_is_initialized() noexcept;
+  static void impl_finalize();
+
+  static int impl_thread_pool_size() noexcept {
+    hpx::runtime *rt = hpx::get_runtime_ptr();
+    if (rt == nullptr) {
+      return 0;
+    } else {
+      if (hpx::threads::get_self_ptr() == nullptr) {
+        return hpx::resource::get_thread_pool(0).get_os_thread_count();
+      } else {
+        return hpx::this_thread::get_pool()->get_os_thread_count();
+      }
+    }
+  }
+
+  static int impl_thread_pool_rank() noexcept {
+    hpx::runtime *rt = hpx::get_runtime_ptr();
+    if (rt == nullptr) {
+      return 0;
+    } else {
+      if (hpx::threads::get_self_ptr() == nullptr) {
+        return 0;
+      } else {
+        return hpx::this_thread::get_pool()->get_pool_index();
+      }
+    }
+  }
+
+  static int impl_thread_pool_size(int depth) {
+    if (depth == 0) {
+      return impl_thread_pool_size();
+    } else {
+      return 1;
+    }
+  }
+
+  static int impl_max_hardware_threads() noexcept {
+    return hpx::threads::hardware_concurrency();
+  }
+
+  static int impl_hardware_thread_id() noexcept {
+    return hpx::get_worker_thread_num();
+  }
+
+  Kokkos::Impl::thread_buffer &impl_get_buffer() const noexcept {
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+    return m_buffer.get();
+#else
+    return m_default_buffer;
+#endif
+  }
+
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+  hpx::shared_future<void> &impl_get_future() const noexcept {
+    return m_future;
+  }
+
+  hpx::spinlock &impl_get_future_mutex() const noexcept {
+    return m_future_mutex;
+  }
+#endif
+
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+  struct KOKKOS_ATTRIBUTE_NODISCARD reset_on_exit_parallel {
+    HPX const &m_space;
+    reset_on_exit_parallel(HPX const &space) : m_space(space) {}
+    ~reset_on_exit_parallel() {
+      // See [note 1] for an explanation. m_independent_instance_data is
+      // marked mutable.
+      m_space.m_independent_instance_data.reset();
+
+      HPX::impl_decrement_active_parallel_region_count();
+    }
+  };
+
+  // This struct is identical to the above except it does not reset the shared
+  // data. It does, however, still decrement the parallel region count. It is
+  // meant for use in parallel regions which do not capture the execution space
+  // instance.
+  struct KOKKOS_ATTRIBUTE_NODISCARD reset_count_on_exit_parallel {
+    reset_count_on_exit_parallel() {}
+    ~reset_count_on_exit_parallel() {
+      HPX::impl_decrement_active_parallel_region_count();
+    }
+  };
+#else
+  struct KOKKOS_ATTRIBUTE_NODISCARD reset_on_exit_parallel {
+    reset_on_exit_parallel(HPX const &) {}
+    ~reset_on_exit_parallel() {}
+  };
+
+  struct KOKKOS_ATTRIBUTE_NODISCARD reset_count_on_exit_parallel {
+    reset_count_on_exit_parallel() {}
+    ~reset_count_on_exit_parallel() {}
+  };
+#endif
+
+  static constexpr const char *name() noexcept { return "HPX"; }
+};
+}  // namespace Experimental
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<Kokkos::Experimental::HPX> {
+  static constexpr DeviceType id = DeviceType::HPX;
+  static int device_id(const Kokkos::Experimental::HPX &) { return 0; }
+};
+}  // namespace Experimental
+}  // namespace Tools
+
+namespace Impl {
+
+#if defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH)
+template <typename Closure>
+inline void dispatch_execute_task(Closure *closure,
+                                  Kokkos::Experimental::HPX const &instance,
+                                  bool force_synchronous = false) {
+  Kokkos::Experimental::HPX::impl_increment_active_parallel_region_count();
+
+  Closure closure_copy = *closure;
+
+  {
+    std::unique_lock<hpx::spinlock> l(instance.impl_get_future_mutex());
+    hpx::util::ignore_lock(&instance.impl_get_future_mutex());
+    hpx::shared_future<void> &fut = instance.impl_get_future();
+
+    fut = fut.then(hpx::execution::parallel_executor(
+                       hpx::threads::thread_schedule_hint(0)),
+                   [closure_copy](hpx::shared_future<void> &&) {
+                     return closure_copy.execute_task();
+                   });
+  }
+
+  if (force_synchronous) {
+    instance.fence(
+        "Kokkos::Experimental::Impl::HPX::dispatch_execute_task: fence due to "
+        "forced syncronizations");
+  }
+}
+#else
+template <typename Closure>
+inline void dispatch_execute_task(Closure *closure,
+                                  Kokkos::Experimental::HPX const &,
+                                  bool = false) {
+  closure->execute_task();
+}
+#endif
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::HPX::memory_space,
+                         Kokkos::Experimental::HPX::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+template <>
+class UniqueToken<HPX, UniqueTokenScope::Instance> {
+ private:
+  using buffer_type = Kokkos::View<uint32_t *, Kokkos::HostSpace>;
+  int m_count;
+  buffer_type m_buffer_view;
+  uint32_t volatile *m_buffer;
+
+ public:
+  using execution_space = HPX;
+  using size_type       = int;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const & = execution_space()) noexcept
+      : m_count(execution_space::impl_max_hardware_threads()),
+        m_buffer_view(buffer_type()),
+        m_buffer(nullptr) {}
+
+  UniqueToken(size_type max_size, execution_space const & = execution_space())
+      : m_count(max_size > execution_space::impl_max_hardware_threads()
+                    ? execution_space::impl_max_hardware_threads()
+                    : max_size),
+        m_buffer_view(
+            max_size > execution_space::impl_max_hardware_threads()
+                ? buffer_type()
+                : buffer_type("UniqueToken::m_buffer_view",
+                              ::Kokkos::Impl::concurrent_bitset::buffer_bound(
+                                  m_count))),
+        m_buffer(m_buffer_view.data()) {}
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int size() const noexcept { return m_count; }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int acquire() const noexcept {
+    KOKKOS_IF_ON_HOST((
+        if (m_buffer == nullptr) {
+          return execution_space::impl_hardware_thread_id();
+        } else {
+          const ::Kokkos::pair<int, int> result =
+              ::Kokkos::Impl::concurrent_bitset::acquire_bounded(
+                  m_buffer, m_count, ::Kokkos::Impl::clock_tic() % m_count);
+
+          if (result.first < 0) {
+            ::Kokkos::abort(
+                "UniqueToken<HPX> failure to acquire tokens, no tokens "
+                "available");
+          }
+          return result.first;
+        }))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(int i) const noexcept {
+    KOKKOS_IF_ON_HOST((if (m_buffer != nullptr) {
+      ::Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
+    }))
+
+    KOKKOS_IF_ON_DEVICE(((void)i;))
+  }
+};
+
+template <>
+class UniqueToken<HPX, UniqueTokenScope::Global> {
+ public:
+  using execution_space = HPX;
+  using size_type       = int;
+  UniqueToken(execution_space const & = execution_space()) noexcept {}
+
+  // NOTE: Currently this assumes that there is no oversubscription.
+  // hpx::get_num_worker_threads can't be used directly because it may yield
+  // it's task (problematic if called after hpx::get_worker_thread_num).
+  int size() const noexcept { return HPX::impl_max_hardware_threads(); }
+  int acquire() const noexcept { return HPX::impl_hardware_thread_id(); }
+  void release(int) const noexcept {}
+};
+}  // namespace Experimental
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+struct HPXTeamMember {
+ public:
+  using execution_space = Kokkos::Experimental::HPX;
+  using scratch_memory_space =
+      Kokkos::ScratchMemorySpace<Kokkos::Experimental::HPX>;
+
+ private:
+  scratch_memory_space m_team_shared;
+
+  int m_league_size;
+  int m_league_rank;
+  int m_team_size;
+  int m_team_rank;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  const scratch_memory_space &team_shmem() const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space &team_scratch(const int) const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space &thread_scratch(const int) const {
+    return m_team_shared.set_team_thread_mode(0, team_size(), team_rank());
+  }
+
+  KOKKOS_INLINE_FUNCTION int league_rank() const noexcept {
+    return m_league_rank;
+  }
+
+  KOKKOS_INLINE_FUNCTION int league_size() const noexcept {
+    return m_league_size;
+  }
+
+  KOKKOS_INLINE_FUNCTION int team_rank() const noexcept { return m_team_rank; }
+  KOKKOS_INLINE_FUNCTION int team_size() const noexcept { return m_team_size; }
+
+  template <class... Properties>
+  constexpr KOKKOS_INLINE_FUNCTION HPXTeamMember(
+      const TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>
+          &policy,
+      const int team_rank, const int league_rank, void *scratch,
+      size_t scratch_size) noexcept
+      : m_team_shared(scratch, scratch_size, scratch, scratch_size),
+        m_league_size(policy.league_size()),
+        m_league_rank(league_rank),
+        m_team_size(policy.team_size()),
+        m_team_rank(team_rank) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void team_barrier() const {}
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType &, const int &) const {}
+
+  template <class Closure, class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(const Closure &closure,
+                                             ValueType &value,
+                                             const int &) const {
+    closure(value);
+  }
+
+  template <class ValueType, class JoinOp>
+  KOKKOS_INLINE_FUNCTION ValueType team_reduce(const ValueType &value,
+                                               const JoinOp &) const {
+    return value;
+  }
+
+  template <class ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(const ReducerType &) const {}
+
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type
+  team_scan(const Type &value, Type *const global_accum = nullptr) const {
+    if (global_accum) {
+      Kokkos::atomic_fetch_add(global_accum, value);
+    }
+
+    return 0;
+  }
+};
+
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>
+    : public PolicyTraits<Properties...> {
+  int m_league_size;
+  int m_team_size;
+  std::size_t m_team_scratch_size[2];
+  std::size_t m_thread_scratch_size[2];
+  int m_chunk_size;
+
+ public:
+  using traits = PolicyTraits<Properties...>;
+
+  //! Tag this class as a kokkos execution policy
+  using execution_policy = TeamPolicyInternal;
+
+  using member_type = HPXTeamMember;
+
+  //! Execution space of this execution policy:
+  using execution_space = Kokkos::Experimental::HPX;
+
+  // NOTE: Max size is 1 for simplicity. In most cases more than 1 is not
+  // necessary on CPU. Implement later if there is a need.
+  template <class FunctorType>
+  inline static int team_size_max(const FunctorType &) {
+    return 1;
+  }
+
+  template <class FunctorType>
+  inline static int team_size_recommended(const FunctorType &) {
+    return 1;
+  }
+
+  template <class FunctorType>
+  inline static int team_size_recommended(const FunctorType &, const int &) {
+    return 1;
+  }
+
+  template <class FunctorType>
+  int team_size_max(const FunctorType &, const ParallelForTag &) const {
+    return 1;
+  }
+
+  template <class FunctorType>
+  int team_size_max(const FunctorType &, const ParallelReduceTag &) const {
+    return 1;
+  }
+
+  template <class FunctorType, class ReducerType>
+  int team_size_max(const FunctorType &, const ReducerType &,
+                    const ParallelReduceTag &) const {
+    return 1;
+  }
+
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType &, const ParallelForTag &) const {
+    return 1;
+  }
+
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType &,
+                            const ParallelReduceTag &) const {
+    return 1;
+  }
+
+  template <class FunctorType, class ReducerType>
+  int team_size_recommended(const FunctorType &, const ReducerType &,
+                            const ParallelReduceTag &) const {
+    return 1;
+  }
+
+  static int vector_length_max() { return 1; }
+
+  inline int impl_vector_length() noexcept { return 1; }
+  inline bool impl_auto_team_size() noexcept { return false; }
+  inline bool impl_auto_vector_length() noexcept { return false; }
+  inline void impl_set_vector_length(int) noexcept {}
+  inline void impl_set_team_size(int) noexcept {}
+
+ private:
+  inline void init(const int league_size_request, const int team_size_request) {
+    m_league_size           = league_size_request;
+    const int max_team_size = 1;  // TODO: Can't use team_size_max(...) because
+                                  // it requires a functor as argument.
+    m_team_size =
+        team_size_request > max_team_size ? max_team_size : team_size_request;
+
+    if (m_chunk_size > 0) {
+      if (!Impl::is_integral_power_of_two(m_chunk_size))
+        Kokkos::abort("TeamPolicy blocking granularity must be power of two");
+    } else {
+      int new_chunk_size = 1;
+      while (new_chunk_size * 4 * Kokkos::Experimental::HPX::concurrency() <
+             m_league_size) {
+        new_chunk_size *= 2;
+      }
+
+      if (new_chunk_size < 128) {
+        new_chunk_size = 1;
+        while ((new_chunk_size * Kokkos::Experimental::HPX::concurrency() <
+                m_league_size) &&
+               (new_chunk_size < 128))
+          new_chunk_size *= 2;
+      }
+
+      m_chunk_size = new_chunk_size;
+    }
+  }
+
+ public:
+  inline int team_size() const { return m_team_size; }
+  inline int league_size() const { return m_league_size; }
+
+  size_t scratch_size(const int &level, int team_size_ = -1) const {
+    if (team_size_ < 0) {
+      team_size_ = m_team_size;
+    }
+    return m_team_scratch_size[level] +
+           team_size_ * m_thread_scratch_size[level];
+  }
+
+  inline static int scratch_size_max(int level) {
+    return (level == 0 ? 1024 * 32 :  // Roughly L1 size
+                20 * 1024 * 1024);    // Limit to keep compatibility with CUDA
+  }
+
+ public:
+  template <class ExecSpace, class... OtherProperties>
+  friend class TeamPolicyInternal;
+
+  const typename traits::execution_space &space() const {
+    static typename traits::execution_space m_space;
+    return m_space;
+  }
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(const TeamPolicyInternal<Kokkos::Experimental::HPX,
+                                              OtherProperties...> &p) {
+    m_league_size            = p.m_league_size;
+    m_team_size              = p.m_team_size;
+    m_team_scratch_size[0]   = p.m_team_scratch_size[0];
+    m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+    m_team_scratch_size[1]   = p.m_team_scratch_size[1];
+    m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+    m_chunk_size             = p.m_chunk_size;
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space &,
+                     int league_size_request, int team_size_request,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request);
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space &,
+                     int league_size_request, const Kokkos::AUTO_t &,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, 1);
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space &,
+                     int league_size_request,
+                     const Kokkos::AUTO_t &, /* team_size_request */
+                     const Kokkos::AUTO_t & /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, 1);
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space &,
+                     int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t & /* vector_length_request */
+                     )
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request);
+  }
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t &, /* team_size_request */
+                     const Kokkos::AUTO_t & /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, 1);
+  }
+
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t & /* vector_length_request */
+                     )
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request);
+  }
+
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request);
+  }
+
+  TeamPolicyInternal(int league_size_request, const Kokkos::AUTO_t &,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0) {
+    init(league_size_request, 1);
+  }
+
+  inline int chunk_size() const { return m_chunk_size; }
+
+  inline TeamPolicyInternal &set_chunk_size(
+      typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  inline TeamPolicyInternal &set_scratch_size(const int &level,
+                                              const PerTeamValue &per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  inline TeamPolicyInternal &set_scratch_size(
+      const int &level, const PerThreadValue &per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  inline TeamPolicyInternal &set_scratch_size(
+      const int &level, const PerTeamValue &per_team,
+      const PerThreadValue &per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename Policy>
+typename Policy::member_type get_hpx_adjusted_chunk_size(Policy const &policy) {
+  const int concurrency = Kokkos::Experimental::HPX::concurrency();
+  const typename Policy::member_type n        = policy.end() - policy.begin();
+  typename Policy::member_type new_chunk_size = policy.chunk_size();
+
+  while (n >= 4 * concurrency * new_chunk_size) {
+    new_chunk_size *= 2;
+  }
+
+  return new_chunk_size;
+}
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+                  Kokkos::Experimental::HPX> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Member i) {
+    functor(i);
+  }
+
+  template <class TagType>
+  static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Member i) {
+    const TagType t{};
+    functor(t, i);
+  }
+
+  template <class TagType>
+  static std::enable_if_t<std::is_void<TagType>::value> execute_functor_range(
+      const FunctorType &functor, const Member i_begin, const Member i_end) {
+    for (Member i = i_begin; i < i_end; ++i) {
+      functor(i);
+    }
+  }
+
+  template <class TagType>
+  static std::enable_if_t<!std::is_void<TagType>::value> execute_functor_range(
+      const FunctorType &functor, const Member i_begin, const Member i_end) {
+    const TagType t{};
+    for (Member i = i_begin; i < i_end; ++i) {
+      functor(t, i);
+    }
+  }
+
+ public:
+  void execute() const {
+    Kokkos::Impl::dispatch_execute_task(this, m_policy.space());
+  }
+
+  void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_policy.space());
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+#if KOKKOS_HPX_IMPLEMENTATION == 0
+    using hpx::for_loop;
+
+    for_loop(par.on(exec).with(static_chunk_size(m_policy.chunk_size())),
+             m_policy.begin(), m_policy.end(), [this](const Member i) {
+               execute_functor<WorkTag>(m_functor, i);
+             });
+
+#elif KOKKOS_HPX_IMPLEMENTATION == 1
+    using hpx::for_loop_strided;
+
+    const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
+
+    for_loop_strided(
+        par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
+        [this, chunk_size](const Member i_begin) {
+          const Member i_end = (std::min)(i_begin + chunk_size, m_policy.end());
+          execute_functor_range<WorkTag>(m_functor, i_begin, i_end);
+        });
+#endif
+  }
+
+  inline ParallelFor(const FunctorType &arg_functor, Policy arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                  Kokkos::Experimental::HPX> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+  using WorkTag       = typename MDRangePolicy::work_tag;
+  using WorkRange     = typename Policy::WorkRange;
+  using Member        = typename Policy::member_type;
+  using iterate_type =
+      typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
+                                             WorkTag, void>;
+
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;
+
+ public:
+  void execute() const { dispatch_execute_task(this, m_mdr_policy.space()); }
+
+  inline void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_mdr_policy.space());
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+#if KOKKOS_HPX_IMPLEMENTATION == 0
+    using hpx::for_loop;
+
+    for_loop(par.on(exec).with(
+                 static_chunk_size(get_hpx_adjusted_chunk_size(m_policy))),
+             m_policy.begin(), m_policy.end(), [this](const Member i) {
+               iterate_type(m_mdr_policy, m_functor)(i);
+             });
+
+#elif KOKKOS_HPX_IMPLEMENTATION == 1
+    using hpx::for_loop_strided;
+
+    const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
+
+    for_loop_strided(par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
+                     [this, chunk_size](const Member i_begin) {
+                       const Member i_end =
+                           (std::min)(i_begin + chunk_size, m_policy.end());
+                       for (Member i = i_begin; i < i_end; ++i) {
+                         iterate_type(m_mdr_policy, m_functor)(i);
+                       }
+                     });
+#endif
+  }
+
+  inline ParallelFor(const FunctorType &arg_functor, MDRangePolicy arg_policy)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, arg_policy.m_num_tiles).set_chunk_size(1)) {}
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy &, const Functor &) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::HPX> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+  using value_type     = typename Analysis::value_type;
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  bool m_force_synchronous;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Member i, reference_type update) {
+    functor(i, update);
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Member i, reference_type update) {
+    const TagType t{};
+    functor(t, i, update);
+  }
+
+  template <class TagType>
+  inline std::enable_if_t<std::is_void<TagType>::value> execute_functor_range(
+      reference_type update, const Member i_begin, const Member i_end) const {
+    for (Member i = i_begin; i < i_end; ++i) {
+      m_functor(i, update);
+    }
+  }
+
+  template <class TagType>
+  inline std::enable_if_t<!std::is_void<TagType>::value> execute_functor_range(
+      reference_type update, const Member i_begin, const Member i_end) const {
+    const TagType t{};
+
+    for (Member i = i_begin; i < i_end; ++i) {
+      m_functor(t, i, update);
+    }
+  }
+
+  class value_type_wrapper {
+   private:
+    std::size_t m_value_size;
+    char *m_value_buffer;
+
+   public:
+    value_type_wrapper() : m_value_size(0), m_value_buffer(nullptr) {}
+
+    value_type_wrapper(const std::size_t value_size)
+        : m_value_size(value_size), m_value_buffer(new char[m_value_size]) {}
+
+    value_type_wrapper(const value_type_wrapper &other)
+        : m_value_size(0), m_value_buffer(nullptr) {
+      if (this != &other) {
+        m_value_buffer = new char[other.m_value_size];
+        m_value_size   = other.m_value_size;
+
+        std::copy(other.m_value_buffer, other.m_value_buffer + m_value_size,
+                  m_value_buffer);
+      }
+    }
+
+    ~value_type_wrapper() { delete[] m_value_buffer; }
+
+    value_type_wrapper(value_type_wrapper &&other)
+        : m_value_size(0), m_value_buffer(nullptr) {
+      if (this != &other) {
+        m_value_buffer = other.m_value_buffer;
+        m_value_size   = other.m_value_size;
+
+        other.m_value_buffer = nullptr;
+        other.m_value_size   = 0;
+      }
+    }
+
+    value_type_wrapper &operator=(const value_type_wrapper &other) {
+      if (this != &other) {
+        delete[] m_value_buffer;
+        m_value_buffer = new char[other.m_value_size];
+        m_value_size   = other.m_value_size;
+
+        std::copy(other.m_value_buffer, other.m_value_buffer + m_value_size,
+                  m_value_buffer);
+      }
+
+      return *this;
+    }
+
+    value_type_wrapper &operator=(value_type_wrapper &&other) {
+      if (this != &other) {
+        delete[] m_value_buffer;
+        m_value_buffer = other.m_value_buffer;
+        m_value_size   = other.m_value_size;
+
+        other.m_value_buffer = nullptr;
+        other.m_value_size   = 0;
+      }
+
+      return *this;
+    }
+
+    pointer_type pointer() const {
+      return reinterpret_cast<pointer_type>(m_value_buffer);
+    }
+
+    reference_type reference() const {
+      return Analysis::Reducer::reference(
+          reinterpret_cast<pointer_type>(m_value_buffer));
+    }
+  };
+
+ public:
+  void execute() const {
+    if (m_policy.end() <= m_policy.begin()) {
+      if (m_result_ptr) {
+        typename Analysis::Reducer final_reducer(
+            &ReducerConditional::select(m_functor, m_reducer));
+
+        final_reducer.init(m_result_ptr);
+        final_reducer.final(m_result_ptr);
+      }
+      return;
+    }
+    dispatch_execute_task(this, m_policy.space(), m_force_synchronous);
+  }
+
+  inline void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_policy.space());
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const std::size_t value_size =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+#if KOKKOS_HPX_IMPLEMENTATION == 0
+    // NOTE: This version makes the most use of HPX functionality, but
+    // requires the struct value_type_wrapper to handle different
+    // reference_types. It is also significantly slower than the version
+    // below due to not reusing the buffer used by other functions.
+    using hpx::parallel::reduction;
+
+    value_type_wrapper final_value(value_size);
+    value_type_wrapper identity(value_size);
+
+    final_reducer.init(final_value.pointer());
+    final_reducer.init(identity.pointer());
+
+    for_loop(par.on(exec).with(
+                 static_chunk_size(get_hpx_adjusted_chunk_size(m_policy))),
+             m_policy.begin(), m_policy.end(),
+             reduction(final_value, identity,
+                       [final_reducer](
+                           value_type_wrapper &a,
+                           value_type_wrapper &b) -> value_type_wrapper & {
+                         final_reducer.join(a.pointer(), b.pointer());
+                         return a;
+                       }),
+             [this](Member i, value_type_wrapper &update) {
+               execute_functor<WorkTag>(m_functor, i, update.reference());
+             });
+
+    pointer_type final_value_ptr = final_value.pointer();
+
+#elif KOKKOS_HPX_IMPLEMENTATION == 1
+    using hpx::for_loop_strided;
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+
+    thread_buffer &buffer = m_policy.space().impl_get_buffer();
+    buffer.resize(num_worker_threads, value_size);
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+        [&buffer, final_reducer ](const int t) noexcept {
+          final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+        });
+
+    const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
+
+    for_loop_strided(
+        par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
+        [this, &buffer, chunk_size](const Member i_begin) {
+          reference_type update = Analysis::Reducer::reference(
+              reinterpret_cast<pointer_type>(buffer.get(
+                  Kokkos::Experimental::HPX::impl_hardware_thread_id())));
+          const Member i_end = (std::min)(i_begin + chunk_size, m_policy.end());
+          execute_functor_range<WorkTag>(update, i_begin, i_end);
+        });
+
+    for (int i = 1; i < num_worker_threads; ++i) {
+      final_reducer.join(reinterpret_cast<pointer_type>(buffer.get(0)),
+                         reinterpret_cast<pointer_type>(buffer.get(i)));
+    }
+
+    pointer_type final_value_ptr =
+        reinterpret_cast<pointer_type>(buffer.get(0));
+#endif
+
+    final_reducer.final(final_value_ptr);
+
+    if (m_result_ptr != nullptr) {
+      const int n = Analysis::value_count(
+          ReducerConditional::select(m_functor, m_reducer));
+
+      for (int j = 0; j < n; ++j) {
+        m_result_ptr[j] = final_value_ptr[j];
+      }
+    }
+  }
+
+  template <class ViewType>
+  inline ParallelReduce(
+      const FunctorType &arg_functor, Policy arg_policy,
+      const ViewType &arg_view,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                           !Kokkos::is_reducer<ReducerType>::value,
+                       void *> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_view.data()),
+        m_force_synchronous(!arg_view.impl_track().has_record()) {}
+
+  inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
+                        const ReducerType &reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_force_synchronous(!reducer.view().impl_track().has_record()) {}
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::HPX> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+  using WorkTag       = typename MDRangePolicy::work_tag;
+  using WorkRange     = typename Policy::WorkRange;
+  using Member        = typename Policy::member_type;
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using Analysis       = FunctorAnalysis<FunctorPatternInterface::REDUCE,
+                                   MDRangePolicy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+  using iterate_type =
+      typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
+                                             WorkTag, reference_type>;
+
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  bool m_force_synchronous;
+
+ public:
+  void execute() const {
+    dispatch_execute_task(this, m_mdr_policy.space(), m_force_synchronous);
+  }
+
+  inline void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_mdr_policy.space());
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+    const std::size_t value_size =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+
+    thread_buffer &buffer = m_mdr_policy.space().impl_get_buffer();
+    buffer.resize(num_worker_threads, value_size);
+
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+#if KOKKOS_HPX_IMPLEMENTATION == 0
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+        [&buffer, final_reducer](std::size_t t) {
+          final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+        });
+
+    for_loop(par.on(exec).with(
+                 static_chunk_size(get_hpx_adjusted_chunk_size(m_policy))),
+             m_policy.begin(), m_policy.end(), [this, &buffer](const Member i) {
+               reference_type update = Analysis::Reducer::reference(
+                   reinterpret_cast<pointer_type>(buffer.get(
+                       Kokkos::Experimental::HPX::impl_hardware_thread_id())));
+               iterate_type(m_mdr_policy, m_functor, update)(i);
+             });
+
+#elif KOKKOS_HPX_IMPLEMENTATION == 1
+    using hpx::for_loop_strided;
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), std::size_t(0),
+        num_worker_threads, [&buffer, final_reducer](const std::size_t t) {
+          final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+        });
+
+    const Member chunk_size = get_hpx_adjusted_chunk_size(m_policy);
+
+    for_loop_strided(
+        par.on(exec), m_policy.begin(), m_policy.end(), chunk_size,
+        [this, &buffer, chunk_size](const Member i_begin) {
+          reference_type update = Analysis::Reducer::reference(
+              reinterpret_cast<pointer_type>(buffer.get(
+                  Kokkos::Experimental::HPX::impl_hardware_thread_id())));
+          const Member i_end = (std::min)(i_begin + chunk_size, m_policy.end());
+
+          for (Member i = i_begin; i < i_end; ++i) {
+            iterate_type(m_mdr_policy, m_functor, update)(i);
+          }
+        });
+#endif
+
+    for (int i = 1; i < num_worker_threads; ++i) {
+      final_reducer.join(reinterpret_cast<pointer_type>(buffer.get(0)),
+                         reinterpret_cast<pointer_type>(buffer.get(i)));
+    }
+
+    final_reducer.final(reinterpret_cast<pointer_type>(buffer.get(0)));
+
+    if (m_result_ptr != nullptr) {
+      const int n = Analysis::value_count(
+          ReducerConditional::select(m_functor, m_reducer));
+
+      for (int j = 0; j < n; ++j) {
+        m_result_ptr[j] = reinterpret_cast<pointer_type>(buffer.get(0))[j];
+      }
+    }
+  }
+
+  template <class ViewType>
+  inline ParallelReduce(
+      const FunctorType &arg_functor, MDRangePolicy arg_policy,
+      const ViewType &arg_view,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                           !Kokkos::is_reducer<ReducerType>::value,
+                       void *> = nullptr)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, arg_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_view.data()),
+        m_force_synchronous(!arg_view.impl_track().has_record()) {}
+
+  inline ParallelReduce(const FunctorType &arg_functor,
+                        MDRangePolicy arg_policy, const ReducerType &reducer)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_force_synchronous(!reducer.view().impl_track().has_record()) {}
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy &, const Functor &) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                   Kokkos::Experimental::HPX> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using value_type     = typename Analysis::value_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Member i_begin,
+                        const Member i_end, reference_type update,
+                        const bool final) {
+    for (Member i = i_begin; i < i_end; ++i) {
+      functor(i, update, final);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Member i_begin,
+                        const Member i_end, reference_type update,
+                        const bool final) {
+    const TagType t{};
+    for (Member i = i_begin; i < i_end; ++i) {
+      functor(t, i, update, final);
+    }
+  }
+
+ public:
+  void execute() const { dispatch_execute_task(this, m_policy.space()); }
+
+  inline void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_policy.space());
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+    const int value_count        = Analysis::value_count(m_functor);
+    const std::size_t value_size = Analysis::value_size(m_functor);
+
+    thread_buffer &buffer = m_policy.space().impl_get_buffer();
+    buffer.resize(num_worker_threads, 2 * value_size);
+
+    using hpx::barrier;
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+    barrier<> bar(num_worker_threads);
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+        [this, &bar, &buffer, num_worker_threads, value_count, value_size,
+         final_reducer](int t) {
+          reference_type update_sum =
+              final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+
+          const WorkRange range(m_policy, t, num_worker_threads);
+          execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
+                                         update_sum, false);
+
+          bar.arrive_and_wait();
+
+          if (t == 0) {
+            final_reducer.init(
+                reinterpret_cast<pointer_type>(buffer.get(0) + value_size));
+
+            for (int i = 1; i < num_worker_threads; ++i) {
+              pointer_type ptr_1_prev =
+                  reinterpret_cast<pointer_type>(buffer.get(i - 1));
+              pointer_type ptr_2_prev = reinterpret_cast<pointer_type>(
+                  buffer.get(i - 1) + value_size);
+              pointer_type ptr_2 =
+                  reinterpret_cast<pointer_type>(buffer.get(i) + value_size);
+
+              for (int j = 0; j < value_count; ++j) {
+                ptr_2[j] = ptr_2_prev[j];
+              }
+
+              final_reducer.join(ptr_2, ptr_1_prev);
+            }
+          }
+
+          bar.arrive_and_wait();
+
+          reference_type update_base = Analysis::Reducer::reference(
+              reinterpret_cast<pointer_type>(buffer.get(t) + value_size));
+
+          execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
+                                         update_base, true);
+        });
+  }
+
+  inline ParallelScan(const FunctorType &arg_functor, const Policy &arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::Experimental::HPX> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using value_type     = typename Analysis::value_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  ReturnType &m_returnvalue;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Member i_begin,
+                        const Member i_end, reference_type update,
+                        const bool final) {
+    for (Member i = i_begin; i < i_end; ++i) {
+      functor(i, update, final);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Member i_begin,
+                        const Member i_end, reference_type update,
+                        const bool final) {
+    const TagType t{};
+    for (Member i = i_begin; i < i_end; ++i) {
+      functor(t, i, update, final);
+    }
+  }
+
+ public:
+  void execute() const { dispatch_execute_task(this, m_policy.space()); }
+
+  inline void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_policy.space());
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+    const int value_count        = Analysis::value_count(m_functor);
+    const std::size_t value_size = Analysis::value_size(m_functor);
+
+    thread_buffer &buffer = m_policy.space().impl_get_buffer();
+    buffer.resize(num_worker_threads, 2 * value_size);
+
+    using hpx::barrier;
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+    barrier<> bar(num_worker_threads);
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+        [this, &bar, &buffer, num_worker_threads, value_count, value_size,
+         final_reducer](int t) {
+          reference_type update_sum =
+              final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+
+          const WorkRange range(m_policy, t, num_worker_threads);
+          execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
+                                         update_sum, false);
+
+          bar.arrive_and_wait();
+
+          if (t == 0) {
+            final_reducer.init(
+                reinterpret_cast<pointer_type>(buffer.get(0) + value_size));
+
+            for (int i = 1; i < num_worker_threads; ++i) {
+              pointer_type ptr_1_prev =
+                  reinterpret_cast<pointer_type>(buffer.get(i - 1));
+              pointer_type ptr_2_prev = reinterpret_cast<pointer_type>(
+                  buffer.get(i - 1) + value_size);
+              pointer_type ptr_2 =
+                  reinterpret_cast<pointer_type>(buffer.get(i) + value_size);
+
+              for (int j = 0; j < value_count; ++j) {
+                ptr_2[j] = ptr_2_prev[j];
+              }
+
+              final_reducer.join(ptr_2, ptr_1_prev);
+            }
+          }
+
+          bar.arrive_and_wait();
+
+          reference_type update_base = Analysis::Reducer::reference(
+              reinterpret_cast<pointer_type>(buffer.get(t) + value_size));
+
+          execute_functor_range<WorkTag>(m_functor, range.begin(), range.end(),
+                                         update_base, true);
+
+          if (t == num_worker_threads - 1) {
+            m_returnvalue = update_base;
+          }
+        });
+  }
+
+  inline ParallelScanWithTotal(const FunctorType &arg_functor,
+                               const Policy &arg_policy,
+                               ReturnType &arg_returnvalue)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_returnvalue(arg_returnvalue) {}
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::Experimental::HPX> {
+ private:
+  using Policy  = TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>;
+  using WorkTag = typename Policy::work_tag;
+  using Member  = typename Policy::member_type;
+  using memory_space = Kokkos::HostSpace;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const int m_league;
+  const std::size_t m_shared;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Policy &policy, const int league_rank,
+      char *local_buffer, const std::size_t local_buffer_size) {
+    functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size));
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Policy &policy, const int league_rank,
+      char *local_buffer, const std::size_t local_buffer_size) {
+    const TagType t{};
+    functor(t, Member(policy, 0, league_rank, local_buffer, local_buffer_size));
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Policy &policy,
+                        const int league_rank_begin, const int league_rank_end,
+                        char *local_buffer,
+                        const std::size_t local_buffer_size) {
+    for (int league_rank = league_rank_begin; league_rank < league_rank_end;
+         ++league_rank) {
+      functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size));
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Policy &policy,
+                        const int league_rank_begin, const int league_rank_end,
+                        char *local_buffer,
+                        const std::size_t local_buffer_size) {
+    const TagType t{};
+    for (int league_rank = league_rank_begin; league_rank < league_rank_end;
+         ++league_rank) {
+      functor(t,
+              Member(policy, 0, league_rank, local_buffer, local_buffer_size));
+    }
+  }
+
+ public:
+  void execute() const { dispatch_execute_task(this, m_policy.space()); }
+
+  inline void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_policy.space());
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+
+    thread_buffer &buffer = m_policy.space().impl_get_buffer();
+    buffer.resize(num_worker_threads, m_shared);
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+#if KOKKOS_HPX_IMPLEMENTATION == 0
+    using hpx::for_loop;
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(m_policy.chunk_size())), 0,
+        m_policy.league_size(), [this, &buffer](const int league_rank) {
+          execute_functor<WorkTag>(
+              m_functor, m_policy, league_rank,
+              buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id()),
+              m_shared);
+        });
+
+#elif KOKKOS_HPX_IMPLEMENTATION == 1
+    using hpx::for_loop_strided;
+
+    for_loop_strided(
+        par.on(exec), 0, m_policy.league_size(), m_policy.chunk_size(),
+        [this, &buffer](const int league_rank_begin) {
+          const int league_rank_end =
+              (std::min)(league_rank_begin + m_policy.chunk_size(),
+                         m_policy.league_size());
+          execute_functor_range<WorkTag>(
+              m_functor, m_policy, league_rank_begin, league_rank_end,
+              buffer.get(Kokkos::Experimental::HPX::impl_hardware_thread_id()),
+              m_shared);
+        });
+#endif
+  }
+
+  ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_league(arg_policy.league_size()),
+        m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(
+                     arg_functor, arg_policy.team_size())) {}
+};
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::Experimental::HPX> {
+ private:
+  using Policy  = TeamPolicyInternal<Kokkos::Experimental::HPX, Properties...>;
+  using Member  = typename Policy::member_type;
+  using WorkTag = typename Policy::work_tag;
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using value_type     = typename Analysis::value_type;
+
+  const FunctorType m_functor;
+  const int m_league;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  pointer_type m_result_ptr;
+  const std::size_t m_shared;
+
+  bool m_force_synchronous;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Policy &policy, const int league_rank,
+      char *local_buffer, const std::size_t local_buffer_size,
+      reference_type update) {
+    functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size),
+            update);
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> execute_functor(
+      const FunctorType &functor, const Policy &policy, const int league_rank,
+      char *local_buffer, const std::size_t local_buffer_size,
+      reference_type update) {
+    const TagType t{};
+    functor(t, Member(policy, 0, league_rank, local_buffer, local_buffer_size),
+            update);
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Policy &policy,
+                        const int league_rank_begin, const int league_rank_end,
+                        char *local_buffer, const std::size_t local_buffer_size,
+                        reference_type update) {
+    for (int league_rank = league_rank_begin; league_rank < league_rank_end;
+         ++league_rank) {
+      functor(Member(policy, 0, league_rank, local_buffer, local_buffer_size),
+              update);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value>
+  execute_functor_range(const FunctorType &functor, const Policy &policy,
+                        const int league_rank_begin, const int league_rank_end,
+                        char *local_buffer, const std::size_t local_buffer_size,
+                        reference_type update) {
+    const TagType t{};
+    for (int league_rank = league_rank_begin; league_rank < league_rank_end;
+         ++league_rank) {
+      functor(t,
+              Member(policy, 0, league_rank, local_buffer, local_buffer_size),
+              update);
+    }
+  }
+
+ public:
+  void execute() const {
+    if (m_policy.league_size() * m_policy.team_size() == 0) {
+      if (m_result_ptr) {
+        typename Analysis::Reducer final_reducer(
+            &ReducerConditional::select(m_functor, m_reducer));
+        final_reducer.init(m_result_ptr);
+        final_reducer.final(m_result_ptr);
+      }
+      return;
+    }
+    dispatch_execute_task(this, m_policy.space());
+  }
+
+  inline void execute_task() const {
+    // See [note 1] for an explanation.
+    Kokkos::Experimental::HPX::reset_on_exit_parallel reset_on_exit(
+        m_policy.space());
+
+    const int num_worker_threads = Kokkos::Experimental::HPX::concurrency();
+    const std::size_t value_size =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+
+    thread_buffer &buffer = m_policy.space().impl_get_buffer();
+    buffer.resize(num_worker_threads, value_size + m_shared);
+
+    auto exec = Kokkos::Experimental::HPX::impl_get_executor();
+
+    using hpx::for_loop;
+    using hpx::execution::par;
+    using hpx::execution::static_chunk_size;
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+#if KOKKOS_HPX_IMPLEMENTATION == 0
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+        [&buffer, final_reducer](const std::size_t t) {
+          final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+        });
+
+    for_loop(par.on(exec).with(static_chunk_size(m_policy.chunk_size())), 0,
+             m_policy.league_size(),
+             [this, &buffer, value_size](const int league_rank) {
+               std::size_t t =
+                   Kokkos::Experimental::HPX::impl_hardware_thread_id();
+               reference_type update = Analysis::Reducer::reference(
+                   reinterpret_cast<pointer_type>(buffer.get(t)));
+
+               execute_functor<WorkTag>(m_functor, m_policy, league_rank,
+                                        buffer.get(t) + value_size, m_shared,
+                                        update);
+             });
+
+#elif KOKKOS_HPX_IMPLEMENTATION == 1
+    using hpx::for_loop_strided;
+
+    for_loop(
+        par.on(exec).with(static_chunk_size(1)), 0, num_worker_threads,
+        [&buffer, final_reducer](std::size_t const t) {
+          final_reducer.init(reinterpret_cast<pointer_type>(buffer.get(t)));
+        });
+
+    for_loop_strided(
+        par.on(exec), 0, m_policy.league_size(), m_policy.chunk_size(),
+        [this, &buffer, value_size](int const league_rank_begin) {
+          std::size_t t = Kokkos::Experimental::HPX::impl_hardware_thread_id();
+          reference_type update = Analysis::Reducer::reference(
+              reinterpret_cast<pointer_type>(buffer.get(t)));
+          const int league_rank_end =
+              (std::min)(league_rank_begin + m_policy.chunk_size(),
+                         m_policy.league_size());
+          execute_functor_range<WorkTag>(
+              m_functor, m_policy, league_rank_begin, league_rank_end,
+              buffer.get(t) + value_size, m_shared, update);
+        });
+#endif
+
+    const pointer_type ptr = reinterpret_cast<pointer_type>(buffer.get(0));
+    for (int t = 1; t < num_worker_threads; ++t) {
+      final_reducer.join(ptr, reinterpret_cast<pointer_type>(buffer.get(t)));
+    }
+
+    final_reducer.final(ptr);
+
+    if (m_result_ptr) {
+      const int n = Analysis::value_count(
+          ReducerConditional::select(m_functor, m_reducer));
+
+      for (int j = 0; j < n; ++j) {
+        m_result_ptr[j] = ptr[j];
+      }
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(const FunctorType &arg_functor, const Policy &arg_policy,
+                 const ViewType &arg_result,
+                 std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void *> = nullptr)
+      : m_functor(arg_functor),
+        m_league(arg_policy.league_size()),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(
+                     m_functor, arg_policy.team_size())),
+        m_force_synchronous(!arg_result.impl_track().has_record()) {}
+
+  inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
+                        const ReducerType &reducer)
+      : m_functor(arg_functor),
+        m_league(arg_policy.league_size()),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(
+                     arg_functor, arg_policy.team_size())),
+        m_force_synchronous(!reducer.view().impl_track().has_record()) {}
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+    TeamThreadRange(const Impl::HPXTeamMember &thread, const iType &count) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
+TeamThreadRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
+                const iType2 &i_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+      thread, iType(i_begin), iType(i_end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+    TeamVectorRange(const Impl::HPXTeamMember &thread, const iType &count) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
+TeamVectorRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
+                const iType2 &i_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+      thread, iType(i_begin), iType(i_end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+    ThreadVectorRange(const Impl::HPXTeamMember &thread, const iType &count) {
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::HPXTeamMember>
+ThreadVectorRange(const Impl::HPXTeamMember &thread, const iType1 &i_begin,
+                  const iType2 &i_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>(
+      thread, iType(i_begin), iType(i_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::HPXTeamMember> PerTeam(
+    const Impl::HPXTeamMember &thread) {
+  return Impl::ThreadSingleStruct<Impl::HPXTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::HPXTeamMember> PerThread(
+    const Impl::HPXTeamMember &thread) {
+  return Impl::VectorSingleStruct<Impl::HPXTeamMember>(thread);
+}
+
+/** \brief  Inter-thread parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const Lambda &lambda) {
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment)
+    lambda(i);
+}
+
+/** \brief  Inter-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const Lambda &lambda, ValueType &result) {
+  result = ValueType();
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+}
+
+/** \brief  Intra-thread vector parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const Lambda &lambda) {
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i);
+  }
+}
+
+/** \brief  Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const Lambda &lambda, ValueType &result) {
+  result = ValueType();
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+}
+
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const Lambda &lambda, const ReducerType &reducer) {
+  reducer.init(reducer.reference());
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, reducer.reference());
+  }
+}
+
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const Lambda &lambda, const ReducerType &reducer) {
+  reducer.init(reducer.reference());
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, reducer.reference());
+  }
+}
+
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::HPXTeamMember> const
+        &loop_boundaries,
+    const FunctorType &lambda) {
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void,
+      FunctorType>::value_type;
+
+  value_type scan_val = value_type();
+
+  // Intra-member scan
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, scan_val, false);
+  }
+
+  // 'scan_val' output is the exclusive prefix sum
+  scan_val = loop_boundaries.thread.team_scan(scan_val);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, scan_val, true);
+  }
+}
+
+/** \brief  Intra-thread vector parallel exclusive prefix sum. Executes
+ * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes in the thread and a scan
+ * operation is performed. Depending on the target execution space the operator
+ * might be called twice: once with final=false and once with final=true. When
+ * final==true val contains the prefix sum value. The contribution of this "i"
+ * needs to be added to val no matter whether final==true or not. In a serial
+ * execution (i.e. team_size==1) the operator is only called once with
+ * final==true. Scan_val will be set to the final sum value over all vector
+ */
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const FunctorType &lambda) {
+  using value_type =
+      typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                     TeamPolicy<Experimental::HPX>,
+                                     FunctorType>::value_type;
+
+  value_type scan_val = value_type();
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, scan_val, true);
+  }
+}
+
+/** \brief  Intra-thread vector parallel scan with reducer
+ *
+ */
+template <typename iType, class FunctorType, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::HPXTeamMember>
+        &loop_boundaries,
+    const FunctorType &lambda, const ReducerType &reducer) {
+  typename ReducerType::value_type scan_val;
+  reducer.init(scan_val);
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, scan_val, true);
+  }
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::HPXTeamMember> &,
+    const FunctorType &lambda) {
+  lambda();
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::HPXTeamMember> &,
+    const FunctorType &lambda) {
+  lambda();
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::HPXTeamMember> &,
+    const FunctorType &lambda, ValueType &val) {
+  lambda(val);
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::HPXTeamMember> &,
+    const FunctorType &lambda, ValueType &val) {
+  lambda(val);
+}
+
+}  // namespace Kokkos
+
+#include <HPX/Kokkos_HPX_Task.hpp>
+
+#endif /* #if defined( KOKKOS_ENABLE_HPX ) */
+#endif /* #ifndef KOKKOS_HPX_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Half.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Half.hpp
new file mode 100644 (file)
index 0000000..c108587
--- /dev/null
@@ -0,0 +1,1040 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HALF_HPP_
+#define KOKKOS_HALF_HPP_
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
+#endif
+
+#include <type_traits>
+#include <Kokkos_Macros.hpp>
+#include <iosfwd>  // istream & ostream for extraction and insertion ops
+#include <string>
+
+#ifdef KOKKOS_IMPL_HALF_TYPE_DEFINED
+
+// KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH: A macro to select which
+// floating_pointer_wrapper operator paths should be used. For CUDA, let the
+// compiler conditionally select when device ops are used For SYCL, we have a
+// full half type on both host and device
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+#endif
+
+/************************* BEGIN forward declarations *************************/
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+template <class FloatType>
+class floating_point_wrapper;
+}
+
+// Declare half_t (binary16)
+using half_t = Kokkos::Experimental::Impl::floating_point_wrapper<
+    Kokkos::Impl::half_impl_t ::type>;
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(float val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(bool val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(double val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(short val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(int val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long long val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned short val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned int val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long long val);
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(half_t);
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, bool>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+        cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
+    cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+        cast_from_half(half_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+        cast_from_half(half_t);
+
+// declare bhalf_t
+#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+using bhalf_t = Kokkos::Experimental::Impl::floating_point_wrapper<
+    Kokkos::Impl ::bhalf_impl_t ::type>;
+
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(float val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(bool val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(double val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(short val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(int val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long long val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned short val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned int val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long long val);
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(bhalf_t val);
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, bool>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+        cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
+    cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+        cast_from_bhalf(bhalf_t);
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+        cast_from_bhalf(bhalf_t);
+#endif  // KOKKOS_IMPL_BHALF_TYPE_DEFINED
+
+template <class T>
+static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::half_t cast_to_wrapper(
+    T x, const volatile Kokkos::Impl::half_impl_t::type&);
+
+#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+template <class T>
+static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::bhalf_t cast_to_wrapper(
+    T x, const volatile Kokkos::Impl::bhalf_impl_t::type&);
+#endif  // KOKKOS_IMPL_BHALF_TYPE_DEFINED
+
+template <class T>
+static KOKKOS_INLINE_FUNCTION T
+cast_from_wrapper(const Kokkos::Experimental::half_t& x);
+
+#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+template <class T>
+static KOKKOS_INLINE_FUNCTION T
+cast_from_wrapper(const Kokkos::Experimental::bhalf_t& x);
+#endif  // KOKKOS_IMPL_BHALF_TYPE_DEFINED
+/************************** END forward declarations **************************/
+
+namespace Impl {
+template <class FloatType>
+class alignas(FloatType) floating_point_wrapper {
+ public:
+  using impl_type = FloatType;
+
+ private:
+  impl_type val;
+  using fixed_width_integer_type = std::conditional_t<
+      sizeof(impl_type) == 2, uint16_t,
+      std::conditional_t<
+          sizeof(impl_type) == 4, uint32_t,
+          std::conditional_t<sizeof(impl_type) == 8, uint64_t, void>>>;
+  static_assert(!std::is_void<fixed_width_integer_type>::value,
+                "Invalid impl_type");
+
+ public:
+  // In-class initialization and defaulted default constructors not used
+  // since Cuda supports half precision initialization via the below constructor
+  KOKKOS_FUNCTION
+  floating_point_wrapper() : val(0.0F) {}
+
+// Copy constructors
+// Getting "C2580: multiple versions of a defaulted special
+// member function are not allowed" with VS 16.11.3 and CUDA 11.4.2
+#if defined(_WIN32) && defined(KOKKOS_ENABLE_CUDA)
+  KOKKOS_FUNCTION
+  floating_point_wrapper(const floating_point_wrapper& rhs) : val(rhs.val) {}
+#else
+  KOKKOS_DEFAULTED_FUNCTION
+  floating_point_wrapper(const floating_point_wrapper&) noexcept = default;
+#endif
+
+  KOKKOS_INLINE_FUNCTION
+  floating_point_wrapper(const volatile floating_point_wrapper& rhs) {
+#if defined(KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH) && !defined(KOKKOS_ENABLE_SYCL)
+    val = rhs.val;
+#else
+    const volatile fixed_width_integer_type* rv_ptr =
+        reinterpret_cast<const volatile fixed_width_integer_type*>(&rhs.val);
+    const fixed_width_integer_type rv_val = *rv_ptr;
+    val       = reinterpret_cast<const impl_type&>(rv_val);
+#endif  // KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+  }
+
+  // Don't support implicit conversion back to impl_type.
+  // impl_type is a storage only type on host.
+  KOKKOS_FUNCTION
+  explicit operator impl_type() const { return val; }
+  KOKKOS_FUNCTION
+  explicit operator float() const { return cast_from_wrapper<float>(*this); }
+  KOKKOS_FUNCTION
+  explicit operator bool() const { return cast_from_wrapper<bool>(*this); }
+  KOKKOS_FUNCTION
+  explicit operator double() const { return cast_from_wrapper<double>(*this); }
+  KOKKOS_FUNCTION
+  explicit operator short() const { return cast_from_wrapper<short>(*this); }
+  KOKKOS_FUNCTION
+  explicit operator int() const { return cast_from_wrapper<int>(*this); }
+  KOKKOS_FUNCTION
+  explicit operator long() const { return cast_from_wrapper<long>(*this); }
+  KOKKOS_FUNCTION
+  explicit operator long long() const {
+    return cast_from_wrapper<long long>(*this);
+  }
+  KOKKOS_FUNCTION
+  explicit operator unsigned short() const {
+    return cast_from_wrapper<unsigned short>(*this);
+  }
+  KOKKOS_FUNCTION
+  explicit operator unsigned int() const {
+    return cast_from_wrapper<unsigned int>(*this);
+  }
+  KOKKOS_FUNCTION
+  explicit operator unsigned long() const {
+    return cast_from_wrapper<unsigned long>(*this);
+  }
+  KOKKOS_FUNCTION
+  explicit operator unsigned long long() const {
+    return cast_from_wrapper<unsigned long long>(*this);
+  }
+
+  /**
+   * Conversion constructors.
+   *
+   * Support implicit conversions from impl_type, float, double ->
+   * floating_point_wrapper. Mixed precision expressions require upcasting which
+   * is done in the
+   * "// Binary Arithmetic" operator overloads below.
+   *
+   * Support implicit conversions from integral types -> floating_point_wrapper.
+   * Expressions involving floating_point_wrapper with integral types require
+   * downcasting the integral types to floating_point_wrapper. Existing operator
+   * overloads can handle this with the addition of the below implicit
+   * conversion constructors.
+   */
+  KOKKOS_FUNCTION
+  constexpr floating_point_wrapper(impl_type rhs) : val(rhs) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(float rhs) : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(double rhs) : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  explicit floating_point_wrapper(bool rhs)
+      : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(short rhs) : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(int rhs) : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(long rhs) : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(long long rhs) : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(unsigned short rhs)
+      : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(unsigned int rhs)
+      : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(unsigned long rhs)
+      : val(cast_to_wrapper(rhs, val).val) {}
+  KOKKOS_FUNCTION
+  floating_point_wrapper(unsigned long long rhs)
+      : val(cast_to_wrapper(rhs, val).val) {}
+
+  // Unary operators
+  KOKKOS_FUNCTION
+  floating_point_wrapper operator+() const {
+    floating_point_wrapper tmp = *this;
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    tmp.val = +tmp.val;
+#else
+    tmp.val   = cast_to_wrapper(+cast_from_wrapper<float>(tmp), val).val;
+#endif
+    return tmp;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper operator-() const {
+    floating_point_wrapper tmp = *this;
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    tmp.val = -tmp.val;
+#else
+    tmp.val   = cast_to_wrapper(-cast_from_wrapper<float>(tmp), val).val;
+#endif
+    return tmp;
+  }
+
+  // Prefix operators
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator++() {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    val = val + impl_type(1.0F);  // cuda has no operator++ for __nv_bfloat
+#else
+    float tmp = cast_from_wrapper<float>(*this);
+    ++tmp;
+    val       = cast_to_wrapper(tmp, val).val;
+#endif
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator--() {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    val = val - impl_type(1.0F);  // cuda has no operator-- for __nv_bfloat
+#else
+    float tmp = cast_from_wrapper<float>(*this);
+    --tmp;
+    val = cast_to_wrapper(tmp, val).val;
+#endif
+    return *this;
+  }
+
+  // Postfix operators
+  KOKKOS_FUNCTION
+  floating_point_wrapper operator++(int) {
+    floating_point_wrapper tmp = *this;
+    operator++();
+    return tmp;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper operator--(int) {
+    floating_point_wrapper tmp = *this;
+    operator--();
+    return tmp;
+  }
+
+  // Binary operators
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator=(impl_type rhs) {
+    val = rhs;
+    return *this;
+  }
+
+  template <class T>
+  KOKKOS_FUNCTION floating_point_wrapper& operator=(T rhs) {
+    val = cast_to_wrapper(rhs, val).val;
+    return *this;
+  }
+
+  template <class T>
+  KOKKOS_FUNCTION void operator=(T rhs) volatile {
+    impl_type new_val = cast_to_wrapper(rhs, val).val;
+    volatile fixed_width_integer_type* val_ptr =
+        reinterpret_cast<volatile fixed_width_integer_type*>(
+            const_cast<impl_type*>(&val));
+    *val_ptr = reinterpret_cast<fixed_width_integer_type&>(new_val);
+  }
+
+  // Compound operators
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator+=(floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    val = val + rhs.val;  // cuda has no operator+= for __nv_bfloat
+#else
+    val = cast_to_wrapper(
+              cast_from_wrapper<float>(*this) + cast_from_wrapper<float>(rhs),
+              val)
+              .val;
+#endif
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  void operator+=(const volatile floating_point_wrapper& rhs) volatile {
+    floating_point_wrapper tmp_rhs = rhs;
+    floating_point_wrapper tmp_lhs = *this;
+
+    tmp_lhs += tmp_rhs;
+    *this = tmp_lhs;
+  }
+
+  // Compound operators: upcast overloads for +=
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator+=(T& lhs, floating_point_wrapper rhs) {
+    lhs += static_cast<T>(rhs);
+    return lhs;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator+=(float rhs) {
+    float result = static_cast<float>(val) + rhs;
+    val          = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator+=(double rhs) {
+    double result = static_cast<double>(val) + rhs;
+    val           = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator-=(floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    val = val - rhs.val;  // cuda has no operator-= for __nv_bfloat
+#else
+    val = cast_to_wrapper(
+              cast_from_wrapper<float>(*this) - cast_from_wrapper<float>(rhs),
+              val)
+              .val;
+#endif
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  void operator-=(const volatile floating_point_wrapper& rhs) volatile {
+    floating_point_wrapper tmp_rhs = rhs;
+    floating_point_wrapper tmp_lhs = *this;
+
+    tmp_lhs -= tmp_rhs;
+    *this = tmp_lhs;
+  }
+
+  // Compund operators: upcast overloads for -=
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator-=(T& lhs, floating_point_wrapper rhs) {
+    lhs -= static_cast<T>(rhs);
+    return lhs;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator-=(float rhs) {
+    float result = static_cast<float>(val) - rhs;
+    val          = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator-=(double rhs) {
+    double result = static_cast<double>(val) - rhs;
+    val           = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator*=(floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    val = val * rhs.val;  // cuda has no operator*= for __nv_bfloat
+#else
+    val = cast_to_wrapper(
+              cast_from_wrapper<float>(*this) * cast_from_wrapper<float>(rhs),
+              val)
+              .val;
+#endif
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  void operator*=(const volatile floating_point_wrapper& rhs) volatile {
+    floating_point_wrapper tmp_rhs = rhs;
+    floating_point_wrapper tmp_lhs = *this;
+
+    tmp_lhs *= tmp_rhs;
+    *this = tmp_lhs;
+  }
+
+  // Compund operators: upcast overloads for *=
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator*=(T& lhs, floating_point_wrapper rhs) {
+    lhs *= static_cast<T>(rhs);
+    return lhs;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator*=(float rhs) {
+    float result = static_cast<float>(val) * rhs;
+    val          = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator*=(double rhs) {
+    double result = static_cast<double>(val) * rhs;
+    val           = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator/=(floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    val = val / rhs.val;  // cuda has no operator/= for __nv_bfloat
+#else
+    val = cast_to_wrapper(
+              cast_from_wrapper<float>(*this) / cast_from_wrapper<float>(rhs),
+              val)
+              .val;
+#endif
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  void operator/=(const volatile floating_point_wrapper& rhs) volatile {
+    floating_point_wrapper tmp_rhs = rhs;
+    floating_point_wrapper tmp_lhs = *this;
+
+    tmp_lhs /= tmp_rhs;
+    *this = tmp_lhs;
+  }
+
+  // Compund operators: upcast overloads for /=
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator/=(T& lhs, floating_point_wrapper rhs) {
+    lhs /= static_cast<T>(rhs);
+    return lhs;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator/=(float rhs) {
+    float result = static_cast<float>(val) / rhs;
+    val          = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  floating_point_wrapper& operator/=(double rhs) {
+    double result = static_cast<double>(val) / rhs;
+    val           = static_cast<impl_type>(result);
+    return *this;
+  }
+
+  // Binary Arithmetic
+  KOKKOS_FUNCTION
+  friend floating_point_wrapper operator+(floating_point_wrapper lhs,
+                                          floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    lhs += rhs;
+#else
+    lhs.val = cast_to_wrapper(
+                  cast_from_wrapper<float>(lhs) + cast_from_wrapper<float>(rhs),
+                  lhs.val)
+                  .val;
+#endif
+    return lhs;
+  }
+
+  // Binary Arithmetic upcast operators for +
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator+(floating_point_wrapper lhs, T rhs) {
+    return T(lhs) + rhs;
+  }
+
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator+(T lhs, floating_point_wrapper rhs) {
+    return lhs + T(rhs);
+  }
+
+  KOKKOS_FUNCTION
+  friend floating_point_wrapper operator-(floating_point_wrapper lhs,
+                                          floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    lhs -= rhs;
+#else
+    lhs.val = cast_to_wrapper(
+                  cast_from_wrapper<float>(lhs) - cast_from_wrapper<float>(rhs),
+                  lhs.val)
+                  .val;
+#endif
+    return lhs;
+  }
+
+  // Binary Arithmetic upcast operators for -
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator-(floating_point_wrapper lhs, T rhs) {
+    return T(lhs) - rhs;
+  }
+
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator-(T lhs, floating_point_wrapper rhs) {
+    return lhs - T(rhs);
+  }
+
+  KOKKOS_FUNCTION
+  friend floating_point_wrapper operator*(floating_point_wrapper lhs,
+                                          floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    lhs *= rhs;
+#else
+    lhs.val = cast_to_wrapper(
+                  cast_from_wrapper<float>(lhs) * cast_from_wrapper<float>(rhs),
+                  lhs.val)
+                  .val;
+#endif
+    return lhs;
+  }
+
+  // Binary Arithmetic upcast operators for *
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator*(floating_point_wrapper lhs, T rhs) {
+    return T(lhs) * rhs;
+  }
+
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator*(T lhs, floating_point_wrapper rhs) {
+    return lhs * T(rhs);
+  }
+
+  KOKKOS_FUNCTION
+  friend floating_point_wrapper operator/(floating_point_wrapper lhs,
+                                          floating_point_wrapper rhs) {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    lhs /= rhs;
+#else
+    lhs.val = cast_to_wrapper(
+                  cast_from_wrapper<float>(lhs) / cast_from_wrapper<float>(rhs),
+                  lhs.val)
+                  .val;
+#endif
+    return lhs;
+  }
+
+  // Binary Arithmetic upcast operators for /
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator/(floating_point_wrapper lhs, T rhs) {
+    return T(lhs) / rhs;
+  }
+
+  template <class T>
+  KOKKOS_FUNCTION friend std::enable_if_t<
+      std::is_same<T, float>::value || std::is_same<T, double>::value, T>
+  operator/(T lhs, floating_point_wrapper rhs) {
+    return lhs / T(rhs);
+  }
+
+  // Logical operators
+  KOKKOS_FUNCTION
+  bool operator!() const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(!val);
+#else
+    return !cast_from_wrapper<float>(*this);
+#endif
+  }
+
+  // NOTE: Loses short-circuit evaluation
+  KOKKOS_FUNCTION
+  bool operator&&(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val && rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) && cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  // NOTE: Loses short-circuit evaluation
+  KOKKOS_FUNCTION
+  bool operator||(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val || rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) || cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  // Comparison operators
+  KOKKOS_FUNCTION
+  bool operator==(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val == rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) == cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  KOKKOS_FUNCTION
+  bool operator!=(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val != rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) != cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  KOKKOS_FUNCTION
+  bool operator<(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val < rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) < cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  KOKKOS_FUNCTION
+  bool operator>(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val > rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) > cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  KOKKOS_FUNCTION
+  bool operator<=(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val <= rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) <= cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  KOKKOS_FUNCTION
+  bool operator>=(floating_point_wrapper rhs) const {
+#ifdef KOKKOS_HALF_IS_FULL_TYPE_ON_ARCH
+    return static_cast<bool>(val >= rhs.val);
+#else
+    return cast_from_wrapper<float>(*this) >= cast_from_wrapper<float>(rhs);
+#endif
+  }
+
+  KOKKOS_FUNCTION
+  friend bool operator==(const volatile floating_point_wrapper& lhs,
+                         const volatile floating_point_wrapper& rhs) {
+    floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
+    return tmp_lhs == tmp_rhs;
+  }
+
+  KOKKOS_FUNCTION
+  friend bool operator!=(const volatile floating_point_wrapper& lhs,
+                         const volatile floating_point_wrapper& rhs) {
+    floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
+    return tmp_lhs != tmp_rhs;
+  }
+
+  KOKKOS_FUNCTION
+  friend bool operator<(const volatile floating_point_wrapper& lhs,
+                        const volatile floating_point_wrapper& rhs) {
+    floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
+    return tmp_lhs < tmp_rhs;
+  }
+
+  KOKKOS_FUNCTION
+  friend bool operator>(const volatile floating_point_wrapper& lhs,
+                        const volatile floating_point_wrapper& rhs) {
+    floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
+    return tmp_lhs > tmp_rhs;
+  }
+
+  KOKKOS_FUNCTION
+  friend bool operator<=(const volatile floating_point_wrapper& lhs,
+                         const volatile floating_point_wrapper& rhs) {
+    floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
+    return tmp_lhs <= tmp_rhs;
+  }
+
+  KOKKOS_FUNCTION
+  friend bool operator>=(const volatile floating_point_wrapper& lhs,
+                         const volatile floating_point_wrapper& rhs) {
+    floating_point_wrapper tmp_lhs = lhs, tmp_rhs = rhs;
+    return tmp_lhs >= tmp_rhs;
+  }
+
+  // Insertion and extraction operators
+  friend std::ostream& operator<<(std::ostream& os,
+                                  const floating_point_wrapper& x) {
+    const std::string out = std::to_string(static_cast<double>(x));
+    os << out;
+    return os;
+  }
+
+  friend std::istream& operator>>(std::istream& is, floating_point_wrapper& x) {
+    std::string in;
+    is >> in;
+    x = std::stod(in);
+    return is;
+  }
+};
+}  // namespace Impl
+
+// Declare wrapper overloads now that floating_point_wrapper is declared
+template <class T>
+static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::half_t cast_to_wrapper(
+    T x, const volatile Kokkos::Impl::half_impl_t::type&) {
+  return Kokkos::Experimental::cast_to_half(x);
+}
+
+#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+template <class T>
+static KOKKOS_INLINE_FUNCTION Kokkos::Experimental::bhalf_t cast_to_wrapper(
+    T x, const volatile Kokkos::Impl::bhalf_impl_t::type&) {
+  return Kokkos::Experimental::cast_to_bhalf(x);
+}
+#endif  // KOKKOS_IMPL_BHALF_TYPE_DEFINED
+
+template <class T>
+static KOKKOS_INLINE_FUNCTION T
+cast_from_wrapper(const Kokkos::Experimental::half_t& x) {
+  return Kokkos::Experimental::cast_from_half<T>(x);
+}
+
+#ifdef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+template <class T>
+static KOKKOS_INLINE_FUNCTION T
+cast_from_wrapper(const Kokkos::Experimental::bhalf_t& x) {
+  return Kokkos::Experimental::cast_from_bhalf<T>(x);
+}
+#endif  // KOKKOS_IMPL_BHALF_TYPE_DEFINED
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif  // KOKKOS_IMPL_HALF_TYPE_DEFINED
+
+// If none of the above actually did anything and defined a half precision type
+// define a fallback implementation here using float
+#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_HALF_T_IS_FLOAT true
+namespace Kokkos {
+namespace Impl {
+struct half_impl_t {
+  using type = float;
+};
+}  // namespace Impl
+namespace Experimental {
+
+using half_t = Kokkos::Impl::half_impl_t::type;
+
+// cast_to_half
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(float val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(bool val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(double val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(short val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned short val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(int val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned int val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long long val) { return half_t(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long long val) { return half_t(val); }
+
+// cast_from_half
+// Using an explicit list here too, since the other ones are explicit and for
+// example don't include char
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<
+    std::is_same<T, float>::value || std::is_same<T, bool>::value ||
+        std::is_same<T, double>::value || std::is_same<T, short>::value ||
+        std::is_same<T, unsigned short>::value || std::is_same<T, int>::value ||
+        std::is_same<T, unsigned int>::value || std::is_same<T, long>::value ||
+        std::is_same<T, unsigned long>::value ||
+        std::is_same<T, long long>::value ||
+        std::is_same<T, unsigned long long>::value,
+    T>
+cast_from_half(half_t val) {
+  return T(val);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#else
+#define KOKKOS_HALF_T_IS_FLOAT false
+#endif  // KOKKOS_IMPL_HALF_TYPE_DEFINED
+
+#ifndef KOKKOS_IMPL_BHALF_TYPE_DEFINED
+#define KOKKOS_IMPL_BHALF_TYPE_DEFINED
+#define KOKKOS_BHALF_T_IS_FLOAT true
+namespace Kokkos {
+namespace Impl {
+struct bhalf_impl_t {
+  using type = float;
+};
+}  // namespace Impl
+
+namespace Experimental {
+
+using bhalf_t = Kokkos::Impl::bhalf_impl_t::type;
+
+// cast_to_bhalf
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(float val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(bool val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(double val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(short val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned short val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(int val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned int val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(long long val) { return bhalf_t(val); }
+KOKKOS_INLINE_FUNCTION
+bhalf_t cast_to_bhalf(unsigned long long val) { return bhalf_t(val); }
+
+// cast_from_bhalf
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<
+    std::is_same<T, float>::value || std::is_same<T, bool>::value ||
+        std::is_same<T, double>::value || std::is_same<T, short>::value ||
+        std::is_same<T, unsigned short>::value || std::is_same<T, int>::value ||
+        std::is_same<T, unsigned int>::value || std::is_same<T, long>::value ||
+        std::is_same<T, unsigned long>::value ||
+        std::is_same<T, long long>::value ||
+        std::is_same<T, unsigned long long>::value,
+    T>
+cast_from_bhalf(bhalf_t val) {
+  return T(val);
+}
+}  // namespace Experimental
+}  // namespace Kokkos
+#else
+#define KOKKOS_BHALF_T_IS_FLOAT false
+#endif  // KOKKOS_IMPL_BHALF_TYPE_DEFINED
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_HALF
+#endif
+#endif  // KOKKOS_HALF_HPP_
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_HostSpace.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_HostSpace.hpp
new file mode 100644 (file)
index 0000000..1d67e2f
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_HOSTSPACE_HPP
+#define KOKKOS_HOSTSPACE_HPP
+
+#include <cstring>
+#include <string>
+#include <iosfwd>
+#include <typeinfo>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Concepts.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+#include "impl/Kokkos_HostSpace_deepcopy.hpp"
+#include <impl/Kokkos_MemorySpace.hpp>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+namespace Impl {
+
+/// \brief Initialize lock array for arbitrary size atomics.
+///
+/// Arbitrary atomics are implemented using a hash table of locks
+/// where the hash value is derived from the address of the
+/// object for which an atomic operation is performed.
+/// This function initializes the locks to zero (unset).
+void init_lock_array_host_space();
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+bool lock_address_host_space(void* ptr);
+
+/// \brief Release lock for the address
+///
+/// This function releases the lock for the hash value derived
+/// from the provided ptr. This function should only be called
+/// after previously successfully acquiring a lock with
+/// lock_address.
+void unlock_address_host_space(void* ptr);
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+/// \class HostSpace
+/// \brief Memory management for host memory.
+///
+/// HostSpace is a memory space that governs host memory.  "Host"
+/// memory means the usual CPU-accessible memory.
+class HostSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space = HostSpace;
+  using size_type    = size_t;
+
+  /// \typedef execution_space
+  /// \brief Default execution space for this memory space.
+  ///
+  /// Every memory space has a default execution space.  This is
+  /// useful for things like initializing a View (which happens in
+  /// parallel using the View's default execution space).
+  using execution_space = DefaultHostExecutionSpace;
+
+  //! This memory space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  /**\brief  Default memory space instance */
+  HostSpace();
+  HostSpace(HostSpace&& rhs)      = default;
+  HostSpace(const HostSpace& rhs) = default;
+  HostSpace& operator=(HostSpace&&) = default;
+  HostSpace& operator=(const HostSpace&) = default;
+  ~HostSpace()                           = default;
+
+  /**\brief  Non-default memory space instance to choose allocation mechansim,
+   * if available */
+
+  enum AllocationMechanism {
+    STD_MALLOC,
+    POSIX_MEMALIGN,
+    POSIX_MMAP,
+    INTEL_MM_ALLOC
+  };
+
+  explicit HostSpace(const AllocationMechanism&);
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class Kokkos::Experimental::LogicalMemorySpace;
+
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+ public:
+  /**\brief Return Name of the MemorySpace */
+  static constexpr const char* name() { return m_name; }
+
+ private:
+  AllocationMechanism m_alloc_mech;
+  static constexpr const char* m_name = "Host";
+  friend class Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
+};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+static_assert(Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
+                                              Kokkos::HostSpace>::assignable,
+              "");
+
+template <typename S>
+struct HostMirror {
+ private:
+  // If input execution space can access HostSpace then keep it.
+  // Example: Kokkos::OpenMP can access, Kokkos::Cuda cannot
+  enum {
+    keep_exe = Kokkos::Impl::MemorySpaceAccess<
+        typename S::execution_space::memory_space,
+        Kokkos::HostSpace>::accessible
+  };
+
+  // If HostSpace can access memory space then keep it.
+  // Example:  Cannot access Kokkos::CudaSpace, can access Kokkos::CudaUVMSpace
+  enum {
+    keep_mem =
+        Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
+                                        typename S::memory_space>::accessible
+  };
+
+ public:
+  using Space = std::conditional_t<
+      keep_exe && keep_mem, S,
+      std::conditional_t<keep_mem,
+                         Kokkos::Device<Kokkos::HostSpace::execution_space,
+                                        typename S::memory_space>,
+                         Kokkos::HostSpace>>;
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <>
+class SharedAllocationRecord<Kokkos::HostSpace, void>
+    : public SharedAllocationRecordCommon<Kokkos::HostSpace> {
+ private:
+  friend Kokkos::HostSpace;
+  friend class SharedAllocationRecordCommon<Kokkos::HostSpace>;
+
+  using base_t     = SharedAllocationRecordCommon<Kokkos::HostSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  /**\brief  Root record for tracked allocations from this HostSpace instance */
+  static RecordBase s_root_record;
+#endif
+
+  const Kokkos::HostSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord()
+#if defined( \
+    KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
+      noexcept
+#endif
+      ;
+  SharedAllocationRecord() = default;
+
+  // This constructor does not forward to the one without exec_space arg
+  // in order to work around https://github.com/kokkos/kokkos/issues/5258
+  // This constructor is templated so I can't just put it into the cpp file
+  // like the other constructor.
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /* exec_space*/, const Kokkos::HostSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate)
+      : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+            &SharedAllocationRecord<Kokkos::HostSpace, void>::s_root_record,
+#endif
+            Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                 arg_alloc_size),
+            sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+            arg_label),
+        m_space(arg_space) {
+    this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
+                                                    arg_label);
+  }
+
+  SharedAllocationRecord(
+      const Kokkos::HostSpace& arg_space, const std::string& arg_label,
+      const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate);
+
+ public:
+  KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
+      const Kokkos::HostSpace& arg_space, const std::string& arg_label,
+      const size_t arg_alloc_size) {
+    KOKKOS_IF_ON_HOST((return new SharedAllocationRecord(arg_space, arg_label,
+                                                         arg_alloc_size);))
+    KOKKOS_IF_ON_DEVICE(((void)arg_space; (void)arg_label; (void)arg_alloc_size;
+                         return nullptr;))
+  }
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class DT, class... DP>
+struct ZeroMemset<typename HostSpace::execution_space, DT, DP...> {
+  ZeroMemset(const typename HostSpace::execution_space& exec,
+             const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    // Host spaces, except for HPX, are synchronous and we need to fence for HPX
+    // since we can't properly enqueue a std::memset otherwise.
+    // We can't use exec.fence() directly since we don't have a full definition
+    // of HostSpace here.
+    hostspace_fence(exec);
+    using ValueType = typename View<DT, DP...>::value_type;
+    std::memset(dst.data(), 0, sizeof(ValueType) * dst.size());
+  }
+
+  ZeroMemset(const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    using ValueType = typename View<DT, DP...>::value_type;
+    std::memset(dst.data(), 0, sizeof(ValueType) * dst.size());
+  }
+};
+
+template <>
+struct DeepCopy<HostSpace, HostSpace, DefaultHostExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const DefaultHostExecutionSpace& exec, void* dst, const void* src,
+           size_t n) {
+    hostspace_parallel_deepcopy_async(exec, dst, src, n);
+  }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<HostSpace, HostSpace, ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    hostspace_parallel_deepcopy(dst, src, n);
+  }
+
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence(
+        "Kokkos::Impl::DeepCopy<HostSpace, HostSpace, "
+        "ExecutionSpace>::DeepCopy: fence before copy");
+    hostspace_parallel_deepcopy_async(dst, src, n);
+  }
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+#endif  // #define KOKKOS_HOSTSPACE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Layout.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Layout.hpp
new file mode 100644 (file)
index 0000000..78173c0
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_Layout.hpp
+/// \brief Declaration of various \c MemoryLayout options.
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_LAYOUT_HPP
+#define KOKKOS_LAYOUT_HPP
+
+#include <cstddef>
+#include <impl/Kokkos_Traits.hpp>
+
+namespace Kokkos {
+
+enum { ARRAY_LAYOUT_MAX_RANK = 8 };
+
+//----------------------------------------------------------------------------
+/// \struct LayoutLeft
+/// \brief Memory layout tag indicating left-to-right (Fortran scheme)
+///   striding of multi-indices.
+///
+/// This is an example of a \c MemoryLayout template parameter of
+/// View.  The memory layout describes how View maps from a
+/// multi-index (i0, i1, ..., ik) to a memory location.
+///
+/// "Layout left" indicates a mapping where the leftmost index i0
+/// refers to contiguous access, and strides increase for dimensions
+/// going right from there (i1, i2, ...).  This layout imitates how
+/// Fortran stores multi-dimensional arrays.  For the special case of
+/// a two-dimensional array, "layout left" is also called "column
+/// major."
+struct LayoutLeft {
+  //! Tag this class as a kokkos array layout
+  using array_layout = LayoutLeft;
+
+  size_t dimension[ARRAY_LAYOUT_MAX_RANK];
+
+  enum : bool { is_extent_constructible = true };
+
+  LayoutLeft(LayoutLeft const&) = default;
+  LayoutLeft(LayoutLeft&&)      = default;
+  LayoutLeft& operator=(LayoutLeft const&) = default;
+  LayoutLeft& operator=(LayoutLeft&&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  explicit constexpr LayoutLeft(size_t N0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                size_t N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                size_t N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                size_t N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                size_t N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                size_t N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                size_t N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                size_t N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : dimension{N0, N1, N2, N3, N4, N5, N6, N7} {}
+
+  friend bool operator==(const LayoutLeft& left, const LayoutLeft& right) {
+    for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
+      if (left.dimension[rank] != right.dimension[rank]) return false;
+    return true;
+  }
+
+  friend bool operator!=(const LayoutLeft& left, const LayoutLeft& right) {
+    return !(left == right);
+  }
+};
+
+//----------------------------------------------------------------------------
+/// \struct LayoutRight
+/// \brief Memory layout tag indicating right-to-left (C or
+///   lexigraphical scheme) striding of multi-indices.
+///
+/// This is an example of a \c MemoryLayout template parameter of
+/// View.  The memory layout describes how View maps from a
+/// multi-index (i0, i1, ..., ik) to a memory location.
+///
+/// "Right layout" indicates a mapping where the rightmost index ik
+/// refers to contiguous access, and strides increase for dimensions
+/// going left from there.  This layout imitates how C stores
+/// multi-dimensional arrays.  For the special case of a
+/// two-dimensional array, "layout right" is also called "row major."
+struct LayoutRight {
+  //! Tag this class as a kokkos array layout
+  using array_layout = LayoutRight;
+
+  size_t dimension[ARRAY_LAYOUT_MAX_RANK];
+
+  enum : bool { is_extent_constructible = true };
+
+  LayoutRight(LayoutRight const&) = default;
+  LayoutRight(LayoutRight&&)      = default;
+  LayoutRight& operator=(LayoutRight const&) = default;
+  LayoutRight& operator=(LayoutRight&&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  explicit constexpr LayoutRight(size_t N0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                 size_t N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                 size_t N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                 size_t N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                 size_t N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                 size_t N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                 size_t N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+                                 size_t N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : dimension{N0, N1, N2, N3, N4, N5, N6, N7} {}
+
+  friend bool operator==(const LayoutRight& left, const LayoutRight& right) {
+    for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
+      if (left.dimension[rank] != right.dimension[rank]) return false;
+    return true;
+  }
+
+  friend bool operator!=(const LayoutRight& left, const LayoutRight& right) {
+    return !(left == right);
+  }
+};
+
+//----------------------------------------------------------------------------
+/// \struct LayoutStride
+/// \brief  Memory layout tag indicated arbitrarily strided
+///         multi-index mapping into contiguous memory.
+struct LayoutStride {
+  //! Tag this class as a kokkos array layout
+  using array_layout = LayoutStride;
+
+  size_t dimension[ARRAY_LAYOUT_MAX_RANK];
+  size_t stride[ARRAY_LAYOUT_MAX_RANK];
+
+  enum : bool { is_extent_constructible = false };
+
+  LayoutStride(LayoutStride const&) = default;
+  LayoutStride(LayoutStride&&)      = default;
+  LayoutStride& operator=(LayoutStride const&) = default;
+  LayoutStride& operator=(LayoutStride&&) = default;
+
+  /** \brief  Compute strides from ordered dimensions.
+   *
+   *  Values of order uniquely form the set [0..rank)
+   *  and specify ordering of the dimensions.
+   *  Order = {0,1,2,...} is LayoutLeft
+   *  Order = {...,2,1,0} is LayoutRight
+   */
+  template <typename iTypeOrder, typename iTypeDimen>
+  KOKKOS_INLINE_FUNCTION static LayoutStride order_dimensions(
+      int const rank, iTypeOrder const* const order,
+      iTypeDimen const* const dimen) {
+    LayoutStride tmp;
+    // Verify valid rank order:
+    int check_input = ARRAY_LAYOUT_MAX_RANK < rank ? 0 : int(1 << rank) - 1;
+    for (int r = 0; r < ARRAY_LAYOUT_MAX_RANK; ++r) {
+      tmp.dimension[r] = KOKKOS_IMPL_CTOR_DEFAULT_ARG;
+      tmp.stride[r]    = 0;
+    }
+    for (int r = 0; r < rank; ++r) {
+      check_input &= ~int(1 << order[r]);
+    }
+    if (0 == check_input) {
+      size_t n = 1;
+      for (int r = 0; r < rank; ++r) {
+        tmp.stride[order[r]] = n;
+        n *= (dimen[order[r]]);
+        tmp.dimension[r] = dimen[r];
+      }
+    }
+    return tmp;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  explicit constexpr LayoutStride(
+      size_t N0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S0 = 0,
+      size_t N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S1 = 0,
+      size_t N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S2 = 0,
+      size_t N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S3 = 0,
+      size_t N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S4 = 0,
+      size_t N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S5 = 0,
+      size_t N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S6 = 0,
+      size_t N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG, size_t S7 = 0)
+      : dimension{N0, N1, N2, N3, N4, N5, N6, N7}, stride{S0, S1, S2, S3,
+                                                          S4, S5, S6, S7} {}
+
+  friend bool operator==(const LayoutStride& left, const LayoutStride& right) {
+    for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
+      if (left.dimension[rank] != right.dimension[rank] ||
+          left.stride[rank] != right.stride[rank])
+        return false;
+    return true;
+  }
+
+  friend bool operator!=(const LayoutStride& left, const LayoutStride& right) {
+    return !(left == right);
+  }
+};
+
+// ===================================================================================
+
+//////////////////////////////////////////////////////////////////////////////////////
+
+enum class Iterate {
+  Default,
+  Left,  // Left indices stride fastest
+  Right  // Right indices stride fastest
+};
+
+// To check for LayoutTiled
+// This is to hide extra compile-time 'identifier' info within the LayoutTiled
+// class by not relying on template specialization to include the ArgN*'s
+template <typename LayoutTiledCheck, class Enable = void>
+struct is_layouttiled : std::false_type {};
+
+template <typename LayoutTiledCheck>
+struct is_layouttiled<LayoutTiledCheck,
+                      std::enable_if_t<LayoutTiledCheck::is_array_layout_tiled>>
+    : std::true_type {};
+
+namespace Experimental {
+
+/// LayoutTiled
+// Must have Rank >= 2
+template <
+    Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+    unsigned ArgN1, unsigned ArgN2 = 0, unsigned ArgN3 = 0, unsigned ArgN4 = 0,
+    unsigned ArgN5 = 0, unsigned ArgN6 = 0, unsigned ArgN7 = 0,
+    bool IsPowerOfTwo =
+        (Kokkos::Impl::is_integral_power_of_two(ArgN0) &&
+         Kokkos::Impl::is_integral_power_of_two(ArgN1) &&
+         (Kokkos::Impl::is_integral_power_of_two(ArgN2) || (ArgN2 == 0)) &&
+         (Kokkos::Impl::is_integral_power_of_two(ArgN3) || (ArgN3 == 0)) &&
+         (Kokkos::Impl::is_integral_power_of_two(ArgN4) || (ArgN4 == 0)) &&
+         (Kokkos::Impl::is_integral_power_of_two(ArgN5) || (ArgN5 == 0)) &&
+         (Kokkos::Impl::is_integral_power_of_two(ArgN6) || (ArgN6 == 0)) &&
+         (Kokkos::Impl::is_integral_power_of_two(ArgN7) || (ArgN7 == 0)))>
+struct LayoutTiled {
+  static_assert(IsPowerOfTwo,
+                "LayoutTiled must be given power-of-two tile dimensions");
+
+  using array_layout = LayoutTiled<OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3,
+                                   ArgN4, ArgN5, ArgN6, ArgN7, IsPowerOfTwo>;
+  static constexpr Iterate outer_pattern = OuterP;
+  static constexpr Iterate inner_pattern = InnerP;
+
+  enum { N0 = ArgN0 };
+  enum { N1 = ArgN1 };
+  enum { N2 = ArgN2 };
+  enum { N3 = ArgN3 };
+  enum { N4 = ArgN4 };
+  enum { N5 = ArgN5 };
+  enum { N6 = ArgN6 };
+  enum { N7 = ArgN7 };
+
+  size_t dimension[ARRAY_LAYOUT_MAX_RANK];
+
+  enum : bool { is_extent_constructible = true };
+
+  LayoutTiled(LayoutTiled const&) = default;
+  LayoutTiled(LayoutTiled&&)      = default;
+  LayoutTiled& operator=(LayoutTiled const&) = default;
+  LayoutTiled& operator=(LayoutTiled&&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  explicit constexpr LayoutTiled(size_t argN0 = 0, size_t argN1 = 0,
+                                 size_t argN2 = 0, size_t argN3 = 0,
+                                 size_t argN4 = 0, size_t argN5 = 0,
+                                 size_t argN6 = 0, size_t argN7 = 0)
+      : dimension{argN0, argN1, argN2, argN3, argN4, argN5, argN6, argN7} {}
+
+  friend bool operator==(const LayoutTiled& left, const LayoutTiled& right) {
+    for (unsigned int rank = 0; rank < ARRAY_LAYOUT_MAX_RANK; ++rank)
+      if (left.dimension[rank] != right.dimension[rank]) return false;
+    return true;
+  }
+
+  friend bool operator!=(const LayoutTiled& left, const LayoutTiled& right) {
+    return !(left == right);
+  }
+};
+
+}  // namespace Experimental
+
+// For use with view_copy
+template <typename... Layout>
+struct layout_iterate_type_selector {
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::Iterate::Default;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::Iterate::Default;
+};
+
+template <>
+struct layout_iterate_type_selector<Kokkos::LayoutRight> {
+  static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Right;
+  static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Right;
+};
+
+template <>
+struct layout_iterate_type_selector<Kokkos::LayoutLeft> {
+  static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Left;
+  static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Left;
+};
+
+template <>
+struct layout_iterate_type_selector<Kokkos::LayoutStride> {
+  static const Kokkos::Iterate outer_iteration_pattern =
+      Kokkos::Iterate::Default;
+  static const Kokkos::Iterate inner_iteration_pattern =
+      Kokkos::Iterate::Default;
+};
+
+template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
+          unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
+struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
+    Kokkos::Iterate::Left, Kokkos::Iterate::Left, ArgN0, ArgN1, ArgN2, ArgN3,
+    ArgN4, ArgN5, ArgN6, ArgN7, true>> {
+  static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Left;
+  static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Left;
+};
+
+template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
+          unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
+struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
+    Kokkos::Iterate::Right, Kokkos::Iterate::Left, ArgN0, ArgN1, ArgN2, ArgN3,
+    ArgN4, ArgN5, ArgN6, ArgN7, true>> {
+  static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Right;
+  static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Left;
+};
+
+template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
+          unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
+struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
+    Kokkos::Iterate::Left, Kokkos::Iterate::Right, ArgN0, ArgN1, ArgN2, ArgN3,
+    ArgN4, ArgN5, ArgN6, ArgN7, true>> {
+  static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Left;
+  static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Right;
+};
+
+template <unsigned ArgN0, unsigned ArgN1, unsigned ArgN2, unsigned ArgN3,
+          unsigned ArgN4, unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
+struct layout_iterate_type_selector<Kokkos::Experimental::LayoutTiled<
+    Kokkos::Iterate::Right, Kokkos::Iterate::Right, ArgN0, ArgN1, ArgN2, ArgN3,
+    ArgN4, ArgN5, ArgN6, ArgN7, true>> {
+  static const Kokkos::Iterate outer_iteration_pattern = Kokkos::Iterate::Right;
+  static const Kokkos::Iterate inner_iteration_pattern = Kokkos::Iterate::Right;
+};
+
+}  // namespace Kokkos
+
+#endif  // #ifndef KOKKOS_LAYOUT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_LogicalSpaces.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_LogicalSpaces.hpp
new file mode 100644 (file)
index 0000000..d3ce354
--- /dev/null
@@ -0,0 +1,445 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_LOGICALSPACES_HPP
+#define KOKKOS_LOGICALSPACES_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+#include <cstring>
+namespace Kokkos {
+namespace Experimental {
+struct DefaultMemorySpaceNamer {
+  static constexpr const char* get_name() {
+    return "DefaultLogicalMemorySpaceName";
+  }
+};
+
+struct LogicalSpaceSharesAccess {
+  struct shared_access {};
+  struct no_shared_access {};
+};
+
+/// \class LogicalMemorySpace
+/// \brief
+///
+/// LogicalMemorySpace is a space that is identical to another space,
+/// but differentiable by name and template argument
+template <class BaseSpace, class DefaultBaseExecutionSpace = void,
+          class Namer                = DefaultMemorySpaceNamer,
+          class SharesAccessWithBase = LogicalSpaceSharesAccess::shared_access>
+class LogicalMemorySpace {
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+  // [DZP] For some reason I don't yet know, using LogicalMemorySpaces
+  // inside an OpenMPTarget build causes errors in the
+  // SharedAllocationRecords of other types. This is my way of erroring
+  // a build if we instantiate a LogicalMemSpace in an OMPTarget build
+  static_assert(!std::is_same<BaseSpace, BaseSpace>::value,
+                "Can't use LogicalMemorySpaces in an OpenMPTarget build, we're "
+                "debugging memory issues");
+#endif
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space = LogicalMemorySpace<BaseSpace, DefaultBaseExecutionSpace,
+                                          Namer, SharesAccessWithBase>;
+  using size_type    = typename BaseSpace::size_type;
+
+  /// \typedef execution_space
+  /// \brief Default execution space for this memory space.
+  ///
+  /// Every memory space has a default execution space.  This is
+  /// useful for things like initializing a View (which happens in
+  /// parallel using the View's default execution space).
+
+  using execution_space =
+      std::conditional_t<std::is_void<DefaultBaseExecutionSpace>::value,
+                         typename BaseSpace::execution_space,
+                         DefaultBaseExecutionSpace>;
+
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  LogicalMemorySpace() = default;
+
+  template <typename... Args>
+  LogicalMemorySpace(Args&&... args) : underlying_space((Args &&) args...) {}
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const size_t arg_alloc_size) const {
+    return allocate("[unlabeled]", arg_alloc_size);
+  }
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const {
+    return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+  }
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size) const {
+    deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+  }
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const {
+    impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+  }
+
+  /**\brief Return Name of the MemorySpace */
+  constexpr static const char* name() { return Namer::get_name(); }
+
+ private:
+  BaseSpace underlying_space;
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+  friend class Kokkos::Impl::SharedAllocationRecord<memory_space, void>;
+
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      Kokkos::Tools::SpaceHandle arg_handle =
+                          Kokkos::Tools::make_space_handle(name())) const {
+    return underlying_space.impl_allocate(arg_label, arg_alloc_size,
+                                          arg_logical_size, arg_handle);
+  }
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle arg_handle =
+                           Kokkos::Tools::make_space_handle(name())) const {
+    underlying_space.impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size,
+                                     arg_logical_size, arg_handle);
+  }
+};
+}  // namespace Experimental
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <typename BaseSpace, typename DefaultBaseExecutionSpace, class Namer,
+          typename OtherSpace>
+struct MemorySpaceAccess<
+    Kokkos::Experimental::LogicalMemorySpace<
+        BaseSpace, DefaultBaseExecutionSpace, Namer,
+        Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>,
+    OtherSpace> {
+  enum { assignable = MemorySpaceAccess<BaseSpace, OtherSpace>::assignable };
+  enum { accessible = MemorySpaceAccess<BaseSpace, OtherSpace>::accessible };
+  enum { deepcopy = MemorySpaceAccess<BaseSpace, OtherSpace>::deepcopy };
+};
+
+template <typename BaseSpace, typename DefaultBaseExecutionSpace, class Namer,
+          typename OtherSpace>
+struct MemorySpaceAccess<
+    OtherSpace,
+    Kokkos::Experimental::LogicalMemorySpace<
+        BaseSpace, DefaultBaseExecutionSpace, Namer,
+        Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>> {
+  enum { assignable = MemorySpaceAccess<OtherSpace, BaseSpace>::assignable };
+  enum { accessible = MemorySpaceAccess<OtherSpace, BaseSpace>::accessible };
+  enum { deepcopy = MemorySpaceAccess<OtherSpace, BaseSpace>::deepcopy };
+};
+
+template <typename BaseSpace, typename DefaultBaseExecutionSpace, class Namer>
+struct MemorySpaceAccess<
+    Kokkos::Experimental::LogicalMemorySpace<
+        BaseSpace, DefaultBaseExecutionSpace, Namer,
+        Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>,
+    Kokkos::Experimental::LogicalMemorySpace<
+        BaseSpace, DefaultBaseExecutionSpace, Namer,
+        Kokkos::Experimental::LogicalSpaceSharesAccess::shared_access>> {
+  enum { assignable = true };
+  enum { accessible = true };
+  enum { deepcopy = true };
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+template <class BaseSpace, class DefaultBaseExecutionSpace, class Namer,
+          class SharesAccessSemanticsWithBase>
+class SharedAllocationRecord<Kokkos::Experimental::LogicalMemorySpace<
+                                 BaseSpace, DefaultBaseExecutionSpace, Namer,
+                                 SharesAccessSemanticsWithBase>,
+                             void> : public SharedAllocationRecord<void, void> {
+ private:
+  using SpaceType =
+      Kokkos::Experimental::LogicalMemorySpace<BaseSpace,
+                                               DefaultBaseExecutionSpace, Namer,
+                                               SharesAccessSemanticsWithBase>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  static void deallocate(RecordBase* arg_rec) {
+    delete static_cast<SharedAllocationRecord*>(arg_rec);
+  }
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  /**\brief  Root record for tracked allocations from this
+   * LogicalMemorySpace instance */
+  static RecordBase s_root_record;
+#endif
+
+  const SpaceType m_space;
+
+ protected:
+  ~SharedAllocationRecord() {
+    m_space.deallocate(RecordBase::m_alloc_ptr->m_label,
+                       SharedAllocationRecord<void, void>::m_alloc_ptr,
+                       SharedAllocationRecord<void, void>::m_alloc_size,
+                       (SharedAllocationRecord<void, void>::m_alloc_size -
+                        sizeof(SharedAllocationHeader)));
+  }
+  SharedAllocationRecord() = default;
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/, const SpaceType& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const SpaceType& arg_space, const std::string& arg_label,
+      const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate)
+      : SharedAllocationRecord<void, void>(
+#ifdef KOKKOS_ENABLE_DEBUG
+            &SharedAllocationRecord<SpaceType, void>::s_root_record,
+#endif
+            Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                 arg_alloc_size),
+            sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+            arg_label),
+        m_space(arg_space) {
+    // Fill in the Header information
+    RecordBase::m_alloc_ptr->m_record =
+        static_cast<SharedAllocationRecord<void, void>*>(this);
+
+    strncpy(RecordBase::m_alloc_ptr->m_label, arg_label.c_str(),
+            SharedAllocationHeader::maximum_label_length - 1);
+    // Set last element zero, in case c_str is too long
+    RecordBase::m_alloc_ptr
+        ->m_label[SharedAllocationHeader::maximum_label_length - 1] = '\0';
+  }
+
+ public:
+  inline std::string get_label() const {
+    return std::string(RecordBase::head()->m_label);
+  }
+  KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
+      const SpaceType& arg_space, const std::string& arg_label,
+      const size_t arg_alloc_size) {
+    KOKKOS_IF_ON_HOST((return new SharedAllocationRecord(arg_space, arg_label,
+                                                         arg_alloc_size);))
+    KOKKOS_IF_ON_DEVICE(((void)arg_space; (void)arg_label; (void)arg_alloc_size;
+                         return nullptr;))
+  }
+
+  /**\brief  Allocate tracked memory in the space */
+  static void* allocate_tracked(const SpaceType& arg_space,
+                                const std::string& arg_label,
+                                const size_t arg_alloc_size) {
+    if (!arg_alloc_size) return (void*)nullptr;
+
+    SharedAllocationRecord* const r =
+        allocate(arg_space, arg_label, arg_alloc_size);
+
+    RecordBase::increment(r);
+
+    return r->data();
+  }
+
+  /**\brief  Reallocate tracked memory in the space */
+  static void* reallocate_tracked(void* const arg_alloc_ptr,
+                                  const size_t arg_alloc_size) {
+    SharedAllocationRecord* const r_old = get_record(arg_alloc_ptr);
+    SharedAllocationRecord* const r_new =
+        allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
+
+    Kokkos::Impl::DeepCopy<SpaceType, SpaceType>(
+        r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
+    Kokkos::fence(
+        "SharedAllocationRecord<Kokkos::Experimental::LogicalMemorySpace, "
+        "void>::reallocate_tracked: fence after copying data");
+
+    RecordBase::increment(r_new);
+    RecordBase::decrement(r_old);
+
+    return r_new->data();
+  }
+  /**\brief  Deallocate tracked memory in the space */
+  static void deallocate_tracked(void* const arg_alloc_ptr) {
+    if (arg_alloc_ptr != nullptr) {
+      SharedAllocationRecord* const r = get_record(arg_alloc_ptr);
+
+      RecordBase::decrement(r);
+    }
+  }
+
+  static SharedAllocationRecord* get_record(void* alloc_ptr) {
+    using Header     = SharedAllocationHeader;
+    using RecordHost = SharedAllocationRecord<SpaceType, void>;
+
+    SharedAllocationHeader const* const head =
+        alloc_ptr ? Header::get_header(alloc_ptr)
+                  : (SharedAllocationHeader*)nullptr;
+    RecordHost* const record =
+        head ? static_cast<RecordHost*>(head->m_record) : (RecordHost*)nullptr;
+
+    if (!alloc_ptr || record->m_alloc_ptr != head) {
+      Kokkos::Impl::throw_runtime_exception(std::string(
+          "Kokkos::Impl::SharedAllocationRecord< LogicalMemorySpace<> , "
+          "void >::get_record ERROR"));
+    }
+
+    return record;
+  }
+#ifdef KOKKOS_ENABLE_DEBUG
+  static void print_records(std::ostream& s, const SpaceType&,
+                            bool detail = false) {
+    SharedAllocationRecord<void, void>::print_host_accessible_records(
+        s, "HostSpace", &s_root_record, detail);
+  }
+#else
+  static void print_records(std::ostream&, const SpaceType&,
+                            bool detail = false) {
+    (void)detail;
+    throw_runtime_exception(
+        "SharedAllocationRecord<HostSpace>::print_records only works "
+        "with KOKKOS_ENABLE_DEBUG enabled");
+  }
+#endif
+};
+#ifdef KOKKOS_ENABLE_DEBUG
+/**\brief  Root record for tracked allocations from this LogicalSpace
+ * instance */
+template <class BaseSpace, class DefaultBaseExecutionSpace, class Namer,
+          class SharesAccessSemanticsWithBase>
+SharedAllocationRecord<void, void>
+    SharedAllocationRecord<Kokkos::Experimental::LogicalMemorySpace<
+                               BaseSpace, DefaultBaseExecutionSpace, Namer,
+                               SharesAccessSemanticsWithBase>,
+                           void>::s_root_record;
+#endif
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class Namer, class BaseSpace, class DefaultBaseExecutionSpace,
+          class SharesAccess, class ExecutionSpace>
+struct DeepCopy<Kokkos::Experimental::LogicalMemorySpace<
+                    BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
+                Kokkos::Experimental::LogicalMemorySpace<
+                    BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
+                ExecutionSpace> {
+  DeepCopy(void* dst, void* src, size_t n) {
+    DeepCopy<BaseSpace, BaseSpace, ExecutionSpace>(dst, src, n);
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, void* src, size_t n) {
+    DeepCopy<BaseSpace, BaseSpace, ExecutionSpace>(exec, dst, src, n);
+  }
+};
+
+template <class Namer, class BaseSpace, class DefaultBaseExecutionSpace,
+          class SharesAccess, class ExecutionSpace, class SourceSpace>
+struct DeepCopy<SourceSpace,
+                Kokkos::Experimental::LogicalMemorySpace<
+                    BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
+                ExecutionSpace> {
+  DeepCopy(void* dst, void* src, size_t n) {
+    DeepCopy<SourceSpace, BaseSpace, ExecutionSpace>(dst, src, n);
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, void* src, size_t n) {
+    DeepCopy<SourceSpace, BaseSpace, ExecutionSpace>(exec, dst, src, n);
+  }
+};
+
+template <class Namer, class BaseSpace, class DefaultBaseExecutionSpace,
+          class SharesAccess, class ExecutionSpace, class DestinationSpace>
+struct DeepCopy<Kokkos::Experimental::LogicalMemorySpace<
+                    BaseSpace, DefaultBaseExecutionSpace, Namer, SharesAccess>,
+                DestinationSpace, ExecutionSpace> {
+  DeepCopy(void* dst, void* src, size_t n) {
+    DeepCopy<BaseSpace, DestinationSpace, ExecutionSpace>(dst, src, n);
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, void* src, size_t n) {
+    DeepCopy<BaseSpace, DestinationSpace, ExecutionSpace>(exec, dst, src, n);
+  }
+};
+}  // namespace Impl
+
+}  // namespace Kokkos
+#endif  // KOKKOS_LOGICALSPACES_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Macros.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Macros.hpp
new file mode 100644 (file)
index 0000000..9dbd2de
--- /dev/null
@@ -0,0 +1,682 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_MACROS_HPP
+#define KOKKOS_MACROS_HPP
+
+//----------------------------------------------------------------------------
+/** Pick up configure / build options via #define macros:
+ *
+ *  KOKKOS_ENABLE_CUDA                Kokkos::Cuda execution and memory spaces
+ *  KOKKOS_ENABLE_THREADS             Kokkos::Threads execution space
+ *  KOKKOS_ENABLE_HPX                 Kokkos::Experimental::HPX execution space
+ *  KOKKOS_ENABLE_OPENMP              Kokkos::OpenMP execution space
+ *  KOKKOS_ENABLE_OPENMPTARGET        Kokkos::Experimental::OpenMPTarget
+ *                                    execution space
+ *  KOKKOS_ENABLE_HIP                 Kokkos::Experimental::HIP execution space
+ *  KOKKOS_ENABLE_SYCL                Kokkos::Experimental::SYCL execution space
+ *  KOKKOS_ENABLE_HWLOC               HWLOC library is available.
+ *  KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK  Insert array bounds checks, is expensive!
+ *  KOKKOS_ENABLE_CUDA_UVM            Use CUDA UVM for Cuda memory space.
+ */
+
+#ifndef KOKKOS_DONT_INCLUDE_CORE_CONFIG_H
+#include <KokkosCore_config.h>
+#endif
+
+//----------------------------------------------------------------------------
+/** Pick up compiler specific #define macros:
+ *
+ *  Macros for known compilers evaluate to an integral version value
+ *
+ *  KOKKOS_COMPILER_NVCC
+ *  KOKKOS_COMPILER_GNU
+ *  KOKKOS_COMPILER_INTEL
+ *  KOKKOS_COMPILER_IBM
+ *  KOKKOS_COMPILER_CRAYC
+ *  KOKKOS_COMPILER_APPLECC
+ *  KOKKOS_COMPILER_CLANG
+ *  KOKKOS_COMPILER_PGI
+ *  KOKKOS_COMPILER_MSVC
+ *
+ *  Macros for which compiler extension to use for atomics on intrinsic types
+ *
+ *  KOKKOS_ENABLE_CUDA_ATOMICS
+ *  KOKKOS_ENABLE_GNU_ATOMICS
+ *  KOKKOS_ENABLE_INTEL_ATOMICS
+ *  KOKKOS_ENABLE_OPENMP_ATOMICS
+ *
+ *  A suite of 'KOKKOS_ENABLE_PRAGMA_...' are defined for internal use.
+ *
+ *  Macros for marking functions to run in an execution space:
+ *
+ *  KOKKOS_FUNCTION
+ *  KOKKOS_INLINE_FUNCTION        request compiler to inline
+ *  KOKKOS_FORCEINLINE_FUNCTION   force compiler to inline, use with care!
+ */
+
+//----------------------------------------------------------------------------
+
+#if !defined(KOKKOS_ENABLE_THREADS) && !defined(KOKKOS_ENABLE_CUDA) &&     \
+    !defined(KOKKOS_ENABLE_OPENMP) && !defined(KOKKOS_ENABLE_HPX) &&       \
+    !defined(KOKKOS_ENABLE_OPENMPTARGET) && !defined(KOKKOS_ENABLE_HIP) && \
+    !defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_INTERNAL_NOT_PARALLEL
+#endif
+
+#define KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA
+
+#include <KokkosCore_Config_SetupBackend.hpp>
+
+//----------------------------------------------------------------------------
+// Mapping compiler built-ins to KOKKOS_COMPILER_*** macros
+
+#if defined(__NVCC__)
+// NVIDIA compiler is being used.
+// Code is parsed and separated into host and device code.
+// Host code is compiled again with another compiler.
+// Device code is compile to 'ptx'.
+#define KOKKOS_COMPILER_NVCC __NVCC__
+#endif  // #if defined( __NVCC__ )
+
+#if !defined(KOKKOS_LAMBDA)
+#define KOKKOS_LAMBDA [=]
+#endif
+
+#if (defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)) && \
+    !defined(KOKKOS_CLASS_LAMBDA)
+#define KOKKOS_CLASS_LAMBDA [ =, *this ]
+#endif
+
+//#if !defined( __CUDA_ARCH__ ) // Not compiling Cuda code to 'ptx'.
+
+// Intel compiler for host code.
+
+#if defined(__INTEL_COMPILER)
+#define KOKKOS_COMPILER_INTEL __INTEL_COMPILER
+#elif defined(__INTEL_LLVM_COMPILER)
+#define KOKKOS_COMPILER_INTEL __INTEL_LLVM_COMPILER
+#elif defined(__ICC)
+// Old define
+#define KOKKOS_COMPILER_INTEL __ICC
+#elif defined(__ECC)
+// Very old define
+#define KOKKOS_COMPILER_INTEL __ECC
+#endif
+
+// CRAY compiler for host code
+#if defined(_CRAYC)
+#define KOKKOS_COMPILER_CRAYC _CRAYC
+#endif
+
+#if defined(__IBMCPP__)
+// IBM C++
+#define KOKKOS_COMPILER_IBM __IBMCPP__
+#elif defined(__IBMC__)
+#define KOKKOS_COMPILER_IBM __IBMC__
+#elif defined(__ibmxl_vrm__)  // xlclang++
+#define KOKKOS_COMPILER_IBM __ibmxl_vrm__
+#endif
+
+#if defined(__APPLE_CC__)
+#define KOKKOS_COMPILER_APPLECC __APPLE_CC__
+#endif
+
+#if defined(__clang__) && !defined(KOKKOS_COMPILER_INTEL) && \
+    !defined(KOKKOS_COMPILER_IBM)
+#define KOKKOS_COMPILER_CLANG \
+  __clang_major__ * 100 + __clang_minor__ * 10 + __clang_patchlevel__
+#endif
+
+#if !defined(__clang__) && !defined(KOKKOS_COMPILER_INTEL) && defined(__GNUC__)
+#define KOKKOS_COMPILER_GNU \
+  __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__
+
+#if (530 > KOKKOS_COMPILER_GNU)
+#error "Compiling with GCC version earlier than 5.3.0 is not supported."
+#endif
+#endif
+
+#if defined(__PGIC__)
+#define KOKKOS_COMPILER_PGI \
+  __PGIC__ * 100 + __PGIC_MINOR__ * 10 + __PGIC_PATCHLEVEL__
+
+#if (1740 > KOKKOS_COMPILER_PGI)
+#error "Compiling with PGI version earlier than 17.4 is not supported."
+#endif
+#endif
+
+#if defined(__NVCOMPILER)
+#define KOKKOS_COMPILER_NVHPC                              \
+  __NVCOMPILER_MAJOR__ * 100 + __NVCOMPILER_MINOR__ * 10 + \
+      __NVCOMPILER_PATCHLEVEL__
+#endif
+
+#if defined(_MSC_VER) && !defined(KOKKOS_COMPILER_INTEL)
+#define KOKKOS_COMPILER_MSVC _MSC_VER
+#endif
+
+#if defined(_OPENMP)
+//  Compiling with OpenMP.
+//  The value of _OPENMP is an integer value YYYYMM
+//  where YYYY and MM are the year and month designation
+//  of the supported OpenMP API version.
+#endif  // #if defined( _OPENMP )
+
+#if defined(KOKKOS_ENABLE_CXX17)
+#define KOKKOS_IMPL_FALLTHROUGH [[fallthrough]];
+#elif defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 710)
+#define KOKKOS_IMPL_FALLTHROUGH [[gnu::fallthrough]];
+#elif defined(KOKKOS_COMPILER_CLANG)
+#define KOKKOS_IMPL_FALLTHROUGH [[clang::fallthrough]];
+#else
+#define KOKKOS_IMPL_FALLTHROUGH
+#endif
+
+//----------------------------------------------------------------------------
+// Intel compiler macros
+
+#if defined(KOKKOS_COMPILER_INTEL)
+// FIXME_SYCL
+#if !defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
+#endif
+#if (1800 > KOKKOS_COMPILER_INTEL)
+#define KOKKOS_ENABLE_PRAGMA_SIMD 1
+#endif
+
+// FIXME Workaround for ICE with intel 17,18,19,20,21 in Trilinos
+#if (KOKKOS_COMPILER_INTEL <= 2100)
+#define KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
+#endif
+
+// FIXME_SYCL
+#if !defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
+#endif
+
+#if !defined(KOKKOS_MEMORY_ALIGNMENT)
+#define KOKKOS_MEMORY_ALIGNMENT 64
+#endif
+
+#if defined(_WIN32)
+#define KOKKOS_RESTRICT __restrict
+#else
+#define KOKKOS_RESTRICT __restrict__
+#endif
+
+#ifndef KOKKOS_IMPL_ALIGN_PTR
+#if defined(_WIN32)
+#define KOKKOS_IMPL_ALIGN_PTR(size) __declspec(align_value(size))
+#else
+#define KOKKOS_IMPL_ALIGN_PTR(size) __attribute__((align_value(size)))
+#endif
+#endif
+
+#if (1700 > KOKKOS_COMPILER_INTEL)
+#error "Compiling with Intel version earlier than 17.0 is not supported."
+#endif
+
+#if !defined(KOKKOS_ENABLE_ASM) && !defined(_WIN32)
+#define KOKKOS_ENABLE_ASM 1
+#endif
+
+#if !defined(KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION)
+#if !defined(_WIN32)
+#define KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
+  inline __attribute__((always_inline))
+#define KOKKOS_IMPL_HOST_FORCEINLINE __attribute__((always_inline))
+#else
+#define KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION inline
+#endif
+#endif
+
+#if defined(KOKKOS_ARCH_AVX512MIC)
+#define KOKKOS_ENABLE_RFO_PREFETCH 1
+#if (KOKKOS_COMPILER_INTEL < 1800) && !defined(KOKKOS_KNL_USE_ASM_WORKAROUND)
+#define KOKKOS_KNL_USE_ASM_WORKAROUND 1
+#endif
+#endif
+
+#if (1800 > KOKKOS_COMPILER_INTEL)
+#define KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION
+#endif
+
+#if defined(__MIC__)
+// Compiling for Xeon Phi
+#endif
+#endif
+
+//----------------------------------------------------------------------------
+// Cray compiler macros
+
+#if defined(KOKKOS_COMPILER_CRAYC)
+#endif
+
+//----------------------------------------------------------------------------
+// IBM Compiler macros
+
+#if defined(KOKKOS_COMPILER_IBM)
+#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+//#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
+//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+//#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
+//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
+
+#if !defined(KOKKOS_ENABLE_ASM)
+#define KOKKOS_ENABLE_ASM 1
+#endif
+#endif
+
+//----------------------------------------------------------------------------
+// CLANG compiler macros
+
+#if defined(KOKKOS_COMPILER_CLANG)
+//#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+//#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
+//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+//#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
+//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
+
+#if !defined(KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION)
+#define KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
+  inline __attribute__((always_inline))
+#define KOKKOS_IMPL_HOST_FORCEINLINE __attribute__((always_inline))
+#endif
+
+#if !defined(KOKKOS_IMPL_ALIGN_PTR)
+#define KOKKOS_IMPL_ALIGN_PTR(size) __attribute__((aligned(size)))
+#endif
+
+#endif
+
+//----------------------------------------------------------------------------
+// GNU Compiler macros
+
+#if defined(KOKKOS_COMPILER_GNU)
+//#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+//#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
+//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+//#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
+//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
+
+#if defined(KOKKOS_ARCH_AVX512MIC)
+#define KOKKOS_ENABLE_RFO_PREFETCH 1
+#endif
+
+#if !defined(KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION)
+#define KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION \
+  inline __attribute__((always_inline))
+#define KOKKOS_IMPL_HOST_FORCEINLINE __attribute__((always_inline))
+#endif
+
+#define KOKKOS_RESTRICT __restrict__
+
+#if !defined(KOKKOS_ENABLE_ASM) && !defined(__PGIC__) &&            \
+    (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
+     defined(__x86_64__) || defined(__PPC64__))
+#define KOKKOS_ENABLE_ASM 1
+#endif
+#endif
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_COMPILER_PGI)
+#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+#define KOKKOS_ENABLE_PRAGMA_IVDEP 1
+//#define KOKKOS_ENABLE_PRAGMA_LOOPCOUNT 1
+#define KOKKOS_ENABLE_PRAGMA_VECTOR 1
+//#define KOKKOS_ENABLE_PRAGMA_SIMD 1
+#endif
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_COMPILER_NVCC)
+#if defined(__CUDA_ARCH__)
+#define KOKKOS_ENABLE_PRAGMA_UNROLL 1
+#endif
+#endif
+
+//----------------------------------------------------------------------------
+// Define function marking macros if compiler specific macros are undefined:
+
+#if !defined(KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION)
+#define KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION inline
+#endif
+
+#if !defined(KOKKOS_IMPL_HOST_FORCEINLINE)
+#define KOKKOS_IMPL_HOST_FORCEINLINE inline
+#endif
+
+#if !defined(KOKKOS_IMPL_FORCEINLINE_FUNCTION)
+#define KOKKOS_IMPL_FORCEINLINE_FUNCTION KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+#endif
+
+#if !defined(KOKKOS_IMPL_FORCEINLINE)
+#define KOKKOS_IMPL_FORCEINLINE KOKKOS_IMPL_HOST_FORCEINLINE
+#endif
+
+#if !defined(KOKKOS_IMPL_INLINE_FUNCTION)
+#define KOKKOS_IMPL_INLINE_FUNCTION inline
+#endif
+
+#if !defined(KOKKOS_IMPL_FUNCTION)
+#define KOKKOS_IMPL_FUNCTION /**/
+#endif
+
+#if !defined(KOKKOS_INLINE_FUNCTION_DELETED)
+#define KOKKOS_INLINE_FUNCTION_DELETED inline
+#endif
+
+#if !defined(KOKKOS_DEFAULTED_FUNCTION)
+#define KOKKOS_DEFAULTED_FUNCTION inline
+#endif
+
+#if !defined(KOKKOS_IMPL_HOST_FUNCTION)
+#define KOKKOS_IMPL_HOST_FUNCTION
+#endif
+
+#if !defined(KOKKOS_IMPL_DEVICE_FUNCTION)
+#define KOKKOS_IMPL_DEVICE_FUNCTION
+#endif
+
+// Temporary solution for SYCL not supporting printf in kernels.
+// Might disappear at any point once we have found another solution.
+#if !defined(KOKKOS_IMPL_DO_NOT_USE_PRINTF)
+#define KOKKOS_IMPL_DO_NOT_USE_PRINTF(...) printf(__VA_ARGS__)
+#endif
+
+//----------------------------------------------------------------------------
+// Define final version of functions. This is so that clang tidy can find these
+// macros more easily
+#if defined(__clang_analyzer__)
+#define KOKKOS_FUNCTION \
+  KOKKOS_IMPL_FUNCTION __attribute__((annotate("KOKKOS_FUNCTION")))
+#define KOKKOS_INLINE_FUNCTION \
+  KOKKOS_IMPL_INLINE_FUNCTION  \
+  __attribute__((annotate("KOKKOS_INLINE_FUNCTION")))
+#define KOKKOS_FORCEINLINE_FUNCTION \
+  KOKKOS_IMPL_FORCEINLINE_FUNCTION  \
+  __attribute__((annotate("KOKKOS_FORCEINLINE_FUNCTION")))
+#else
+#define KOKKOS_FUNCTION KOKKOS_IMPL_FUNCTION
+#define KOKKOS_INLINE_FUNCTION KOKKOS_IMPL_INLINE_FUNCTION
+#define KOKKOS_FORCEINLINE_FUNCTION KOKKOS_IMPL_FORCEINLINE_FUNCTION
+#endif
+
+//----------------------------------------------------------------------------
+// Define empty macro for restrict if necessary:
+
+#if !defined(KOKKOS_RESTRICT)
+#define KOKKOS_RESTRICT
+#endif
+
+//----------------------------------------------------------------------------
+// Define Macro for alignment:
+
+#if !defined(KOKKOS_MEMORY_ALIGNMENT)
+#define KOKKOS_MEMORY_ALIGNMENT 64
+#endif
+
+#if !defined(KOKKOS_MEMORY_ALIGNMENT_THRESHOLD)
+#define KOKKOS_MEMORY_ALIGNMENT_THRESHOLD 1
+#endif
+
+#if !defined(KOKKOS_IMPL_ALIGN_PTR)
+#define KOKKOS_IMPL_ALIGN_PTR(size) /* */
+#endif
+
+//----------------------------------------------------------------------------
+// Determine the default execution space for parallel dispatch.
+// There is zero or one default execution space specified.
+
+#if 1 < ((defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_CUDA) ? 1 : 0) +         \
+         (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP) ? 1 : 0) +          \
+         (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL) ? 1 : 0) +         \
+         (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMPTARGET) ? 1 : 0) + \
+         (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP) ? 1 : 0) +       \
+         (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_THREADS) ? 1 : 0) +      \
+         (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HPX) ? 1 : 0) +          \
+         (defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SERIAL) ? 1 : 0))
+#error "More than one KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_* specified."
+#endif
+
+// If default is not specified then chose from enabled execution spaces.
+// Priority: CUDA, HIP, SYCL, OPENMPTARGET, OPENMP, THREADS, HPX, SERIAL
+#if defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_CUDA)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMPTARGET)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_THREADS)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HPX)
+#elif defined(KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SERIAL)
+#elif defined(KOKKOS_ENABLE_CUDA)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_CUDA
+#elif defined(KOKKOS_ENABLE_HIP)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HIP
+#elif defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SYCL
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMPTARGET
+#elif defined(KOKKOS_ENABLE_OPENMP)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_OPENMP
+#elif defined(KOKKOS_ENABLE_THREADS)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_THREADS
+#elif defined(KOKKOS_ENABLE_HPX)
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_HPX
+#else
+#define KOKKOS_ENABLE_DEFAULT_DEVICE_TYPE_SERIAL
+#endif
+
+//----------------------------------------------------------------------------
+// Determine for what space the code is being compiled:
+
+#if defined(__CUDACC__) && defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA)
+#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA
+#elif defined(__SYCL_DEVICE_ONLY__) && defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL
+#elif defined(__HIPCC__) && defined(__HIP_DEVICE_COMPILE__) && \
+    defined(KOKKOS_ENABLE_HIP)
+#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HIP_GPU
+#else
+#define KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST
+#endif
+
+//----------------------------------------------------------------------------
+
+// Remove surrounding parentheses if present
+#define KOKKOS_IMPL_STRIP_PARENS(X) KOKKOS_IMPL_ESC(KOKKOS_IMPL_ISH X)
+#define KOKKOS_IMPL_ISH(...) KOKKOS_IMPL_ISH __VA_ARGS__
+#define KOKKOS_IMPL_ESC(...) KOKKOS_IMPL_ESC_(__VA_ARGS__)
+#define KOKKOS_IMPL_ESC_(...) KOKKOS_IMPL_VAN_##__VA_ARGS__
+#define KOKKOS_IMPL_VAN_KOKKOS_IMPL_ISH
+
+#if defined(KOKKOS_ENABLE_CUDA) && defined(KOKKOS_COMPILER_NVHPC)
+#include <nv/target>
+#define KOKKOS_IF_ON_DEVICE(CODE) NV_IF_TARGET(NV_IS_DEVICE, CODE)
+#define KOKKOS_IF_ON_HOST(CODE) NV_IF_TARGET(NV_IS_HOST, CODE)
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+#ifdef KOKKOS_COMPILER_NVHPC
+#define KOKKOS_IF_ON_DEVICE(CODE)   \
+  if (__builtin_is_device_code()) { \
+    KOKKOS_IMPL_STRIP_PARENS(CODE)  \
+  }
+#define KOKKOS_IF_ON_HOST(CODE)      \
+  if (!__builtin_is_device_code()) { \
+    KOKKOS_IMPL_STRIP_PARENS(CODE)   \
+  }
+#else
+// Base function.
+static constexpr bool kokkos_omp_on_host() { return true; }
+
+#pragma omp begin declare variant match(device = {kind(host)})
+static constexpr bool kokkos_omp_on_host() { return true; }
+#pragma omp end declare variant
+
+#pragma omp begin declare variant match(device = {kind(nohost)})
+static constexpr bool kokkos_omp_on_host() { return false; }
+#pragma omp end declare variant
+
+#define KOKKOS_IF_ON_DEVICE(CODE)        \
+  if constexpr (!kokkos_omp_on_host()) { \
+    KOKKOS_IMPL_STRIP_PARENS(CODE)       \
+  }
+#define KOKKOS_IF_ON_HOST(CODE)         \
+  if constexpr (kokkos_omp_on_host()) { \
+    KOKKOS_IMPL_STRIP_PARENS(CODE)      \
+  }
+#endif
+#endif
+
+#if !defined(KOKKOS_IF_ON_HOST) && !defined(KOKKOS_IF_ON_DEVICE)
+#if (defined(KOKKOS_ENABLE_CUDA) && defined(__CUDA_ARCH__)) ||         \
+    (defined(KOKKOS_ENABLE_HIP) && defined(__HIP_DEVICE_COMPILE__)) || \
+    (defined(KOKKOS_ENABLE_SYCL) && defined(__SYCL_DEVICE_ONLY__))
+#define KOKKOS_IF_ON_DEVICE(CODE) \
+  { KOKKOS_IMPL_STRIP_PARENS(CODE) }
+#define KOKKOS_IF_ON_HOST(CODE) \
+  {}
+#else
+#define KOKKOS_IF_ON_DEVICE(CODE) \
+  {}
+#define KOKKOS_IF_ON_HOST(CODE) \
+  { KOKKOS_IMPL_STRIP_PARENS(CODE) }
+#endif
+#endif
+
+//----------------------------------------------------------------------------
+// If compiling with CUDA, we must use relocatable device code to enable the
+// task policy.
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#if defined(KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE)
+#define KOKKOS_ENABLE_TASKDAG
+#endif
+// FIXME_SYCL Tasks not implemented
+#elif !defined(KOKKOS_ENABLE_HIP) && !defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_ENABLE_TASKDAG
+#endif
+
+#define KOKKOS_INVALID_INDEX (~std::size_t(0))
+
+#define KOKKOS_IMPL_CTOR_DEFAULT_ARG KOKKOS_INVALID_INDEX
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+#define KOKKOS_CONSTEXPR_14 constexpr
+#define KOKKOS_DEPRECATED_TRAILING_ATTRIBUTE
+#endif
+
+// Guard intel compiler version 19 and older
+// intel error #2651: attribute does not apply to any entity
+// using <deprecated_type> KOKKOS_DEPRECATED = ...
+#if defined(KOKKOS_ENABLE_DEPRECATION_WARNINGS) && !defined(__NVCC__) && \
+    (!defined(KOKKOS_COMPILER_INTEL) || KOKKOS_COMPILER_INTEL >= 2021)
+#define KOKKOS_DEPRECATED [[deprecated]]
+#define KOKKOS_DEPRECATED_WITH_COMMENT(comment) [[deprecated(comment)]]
+#else
+#define KOKKOS_DEPRECATED
+#define KOKKOS_DEPRECATED_WITH_COMMENT(comment)
+#endif
+
+#define KOKKOS_IMPL_STRINGIFY(x) #x
+#define KOKKOS_IMPL_TOSTRING(x) KOKKOS_IMPL_STRINGIFY(x)
+
+#ifdef _MSC_VER
+#define KOKKOS_IMPL_DO_PRAGMA(x) __pragma(x)
+#define KOKKOS_IMPL_WARNING(desc) \
+  KOKKOS_IMPL_DO_PRAGMA(message(  \
+      __FILE__ "(" KOKKOS_IMPL_TOSTRING(__LINE__) ") : warning: " #desc))
+#else
+#define KOKKOS_IMPL_DO_PRAGMA(x) _Pragma(#x)
+#define KOKKOS_IMPL_WARNING(desc) KOKKOS_IMPL_DO_PRAGMA(message(#desc))
+#endif
+
+// DJS 05/28/2019: Bugfix: Issue 2155
+// Use KOKKOS_ENABLE_CUDA_LDG_INTRINSIC to avoid memory leak in RandomAccess
+// View
+#if defined(KOKKOS_ENABLE_CUDA) && !defined(KOKKOS_ENABLE_CUDA_LDG_INTRINSIC)
+#define KOKKOS_ENABLE_CUDA_LDG_INTRINSIC
+#endif
+
+#if defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)
+#define KOKKOS_ATTRIBUTE_NODISCARD [[nodiscard]]
+#else
+#define KOKKOS_ATTRIBUTE_NODISCARD
+#endif
+
+#if (defined(KOKKOS_COMPILER_GNU) || defined(KOKKOS_COMPILER_CLANG) ||  \
+     defined(KOKKOS_COMPILER_INTEL) || defined(KOKKOS_COMPILER_PGI)) && \
+    !defined(_WIN32)
+#define KOKKOS_IMPL_ENABLE_STACKTRACE
+#define KOKKOS_IMPL_ENABLE_CXXABI
+#endif
+
+// WORKAROUND for AMD aomp which apparently defines CUDA_ARCH when building for
+// AMD GPUs with OpenMP Target ???
+#if defined(__CUDA_ARCH__) && !defined(__CUDACC__) && \
+    !defined(KOKKOS_ENABLE_HIP) && !defined(KOKKOS_ENABLE_CUDA)
+#undef __CUDA_ARCH__
+#endif
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+#define KOKKOS_THREAD_LOCAL \
+  KOKKOS_DEPRECATED_WITH_COMMENT("Use thread_local instead!") thread_local
+#endif
+
+#if (defined(KOKKOS_IMPL_WINDOWS_CUDA) || defined(KOKKOS_COMPILER_MSVC)) && \
+    !defined(KOKKOS_COMPILER_CLANG)
+// MSVC (as of 16.5.5 at least) does not do empty base class optimization by
+// default when there are multiple bases, even though the standard requires it
+// for standard layout types.
+#define KOKKOS_IMPL_ENFORCE_EMPTY_BASE_OPTIMIZATION __declspec(empty_bases)
+#else
+#define KOKKOS_IMPL_ENFORCE_EMPTY_BASE_OPTIMIZATION
+#endif
+
+#endif  // #ifndef KOKKOS_MACROS_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_MasterLock.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_MasterLock.hpp
new file mode 100644 (file)
index 0000000..5e48595
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_MASTER_LOCK_HPP
+#define KOKKOS_MASTER_LOCK_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+
+namespace Kokkos {
+namespace Experimental {
+
+// my be used to coordinate work between master instances
+// SHOULD NOT be used within a parallel algorithm
+//
+// This lock should be used with with a scoped lock guard
+// i.e. std::unique_lock<Lock>, std::lock_guard
+//
+// cannot be copied or moved
+// has the following functions available
+//
+// Lock()
+// ~Lock()
+//
+// void lock()
+// void unlock()
+// bool try_lock()
+//
+template <typename ExecutionSpace>
+class MasterLock;
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
+
+#endif  // KOKKOS_MASTER_LOCK_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalConstants.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalConstants.hpp
new file mode 100644 (file)
index 0000000..8f7b559
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_MATHEMATICAL_CONSTANTS_HPP
+#define KOKKOS_MATHEMATICAL_CONSTANTS_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <type_traits>
+
+namespace Kokkos {
+namespace Experimental {
+
+#if defined(KOKKOS_ENABLE_CXX17)
+#define KOKKOS_IMPL_MATH_CONSTANT(TRAIT, VALUE) \
+  template <class T>                            \
+  inline constexpr auto TRAIT##_v =             \
+      std::enable_if_t<std::is_floating_point_v<T>, T>(VALUE)
+#else
+#define KOKKOS_IMPL_MATH_CONSTANT(TRAIT, VALUE) \
+  template <class T>                            \
+  constexpr auto TRAIT##_v =                    \
+      std::enable_if_t<std::is_floating_point<T>::value, T>(VALUE)
+#endif
+
+// clang-format off
+KOKKOS_IMPL_MATH_CONSTANT(e,          2.718281828459045235360287471352662498L);
+KOKKOS_IMPL_MATH_CONSTANT(log2e,      1.442695040888963407359924681001892137L);
+KOKKOS_IMPL_MATH_CONSTANT(log10e,     0.434294481903251827651128918916605082L);
+KOKKOS_IMPL_MATH_CONSTANT(pi,         3.141592653589793238462643383279502884L);
+KOKKOS_IMPL_MATH_CONSTANT(inv_pi,     0.318309886183790671537767526745028724L);
+KOKKOS_IMPL_MATH_CONSTANT(inv_sqrtpi, 0.564189583547756286948079451560772586L);
+KOKKOS_IMPL_MATH_CONSTANT(ln2,        0.693147180559945309417232121458176568L);
+KOKKOS_IMPL_MATH_CONSTANT(ln10,       2.302585092994045684017991454684364208L);
+KOKKOS_IMPL_MATH_CONSTANT(sqrt2,      1.414213562373095048801688724209698079L);
+KOKKOS_IMPL_MATH_CONSTANT(sqrt3,      1.732050807568877293527446341505872367L);
+KOKKOS_IMPL_MATH_CONSTANT(inv_sqrt3,  0.577350269189625764509148780501957456L);
+KOKKOS_IMPL_MATH_CONSTANT(egamma,     0.577215664901532860606512090082402431L);
+KOKKOS_IMPL_MATH_CONSTANT(phi,        1.618033988749894848204586834365638118L);
+// clang-format on
+
+#undef KOKKOS_IMPL_MATH_CONSTANT
+
+}  // namespace Experimental
+}  // namespace Kokkos
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHCONSTANTS
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalFunctions.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalFunctions.hpp
new file mode 100644 (file)
index 0000000..6942f84
--- /dev/null
@@ -0,0 +1,412 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_MATHEMATICAL_FUNCTIONS_HPP
+#define KOKKOS_MATHEMATICAL_FUNCTIONS_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHFUNCTIONS
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <cmath>
+#include <cstdlib>
+#include <type_traits>
+
+#ifdef KOKKOS_ENABLE_SYCL
+#include <CL/sycl.hpp>
+#endif
+
+namespace Kokkos {
+
+namespace Impl {
+template <class T, bool = std::is_integral<T>::value>
+struct promote {
+  using type = double;
+};
+template <class T>
+struct promote<T, false> {};
+template <>
+struct promote<long double> {
+  using type = long double;
+};
+template <>
+struct promote<double> {
+  using type = double;
+};
+template <>
+struct promote<float> {
+  using type = float;
+};
+template <class T>
+using promote_t = typename promote<T>::type;
+template <class T, class U,
+          bool = std::is_arithmetic<T>::value&& std::is_arithmetic<U>::value>
+struct promote_2 {
+  using type = decltype(promote_t<T>() + promote_t<U>());
+};
+template <class T, class U>
+struct promote_2<T, U, false> {};
+template <class T, class U>
+using promote_2_t = typename promote_2<T, U>::type;
+}  // namespace Impl
+
+// NOTE long double overloads are not available on the device
+
+#if defined(KOKKOS_ENABLE_SYCL)
+#define KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE sycl
+#else
+#if (defined(KOKKOS_COMPILER_NVCC) || defined(KOKKOS_COMPILER_NVHPC)) && \
+    defined(__GNUC__) && (__GNUC__ < 6) && !defined(__clang__)
+#define KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE
+#else
+#define KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE std
+#endif
+#endif
+
+#if defined(KOKKOS_ENABLE_DEPRECATED_CODE_3)
+#define KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
+    USING_DECLARATIONS_IN_EXPERIMENTAL_NAMESPACE)                      \
+  USING_DECLARATIONS_IN_EXPERIMENTAL_NAMESPACE
+#else
+#define KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED( \
+    USING_DECLARATIONS_IN_EXPERIMENTAL_NAMESPACE)                      \
+  /* nothing */
+#endif
+
+#define KOKKOS_IMPL_MATH_UNARY_FUNCTION(FUNC)                                 \
+  KOKKOS_INLINE_FUNCTION float FUNC(float x) {                                \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                         \
+    return FUNC(x);                                                           \
+  }                                                                           \
+  KOKKOS_INLINE_FUNCTION double FUNC(double x) {                              \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                         \
+    return FUNC(x);                                                           \
+  }                                                                           \
+  inline long double FUNC(long double x) {                                    \
+    using std::FUNC;                                                          \
+    return FUNC(x);                                                           \
+  }                                                                           \
+  KOKKOS_INLINE_FUNCTION float FUNC##f(float x) {                             \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                         \
+    return FUNC(x);                                                           \
+  }                                                                           \
+  inline long double FUNC##l(long double x) {                                 \
+    using std::FUNC;                                                          \
+    return FUNC(x);                                                           \
+  }                                                                           \
+  template <class T>                                                          \
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, double> \
+  FUNC(T x) {                                                                 \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                         \
+    return FUNC(static_cast<double>(x));                                      \
+  }                                                                           \
+  KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(              \
+      namespace Experimental {                                                \
+        using ::Kokkos::FUNC;                                                 \
+        using ::Kokkos::FUNC##f;                                              \
+        using ::Kokkos::FUNC##l;                                              \
+      })
+
+// isinf, isnan, and isinfinite do not work on Windows with CUDA with std::
+// getting warnings about calling host function in device function then
+// runtime test fails
+#if defined(_WIN32) && defined(KOKKOS_ENABLE_CUDA)
+#define KOKKOS_IMPL_MATH_UNARY_PREDICATE(FUNC)                              \
+  KOKKOS_INLINE_FUNCTION bool FUNC(float x) { return ::FUNC(x); }           \
+  KOKKOS_INLINE_FUNCTION bool FUNC(double x) { return ::FUNC(x); }          \
+  inline bool FUNC(long double x) {                                         \
+    using std::FUNC;                                                        \
+    return FUNC(x);                                                         \
+  }                                                                         \
+  template <class T>                                                        \
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, bool> \
+  FUNC(T x) {                                                               \
+    return ::FUNC(static_cast<double>(x));                                  \
+  }                                                                         \
+  KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(            \
+      namespace Experimental { using ::Kokkos::FUNC; })
+#else
+#define KOKKOS_IMPL_MATH_UNARY_PREDICATE(FUNC)                              \
+  KOKKOS_INLINE_FUNCTION bool FUNC(float x) {                               \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                       \
+    return FUNC(x);                                                         \
+  }                                                                         \
+  KOKKOS_INLINE_FUNCTION bool FUNC(double x) {                              \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                       \
+    return FUNC(x);                                                         \
+  }                                                                         \
+  inline bool FUNC(long double x) {                                         \
+    using std::FUNC;                                                        \
+    return FUNC(x);                                                         \
+  }                                                                         \
+  template <class T>                                                        \
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_integral<T>::value, bool> \
+  FUNC(T x) {                                                               \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                       \
+    return FUNC(static_cast<double>(x));                                    \
+  }                                                                         \
+  KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(            \
+      namespace Experimental { using ::Kokkos::FUNC; })
+#endif
+
+#define KOKKOS_IMPL_MATH_BINARY_FUNCTION(FUNC)                          \
+  KOKKOS_INLINE_FUNCTION float FUNC(float x, float y) {                 \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                   \
+    return FUNC(x, y);                                                  \
+  }                                                                     \
+  KOKKOS_INLINE_FUNCTION double FUNC(double x, double y) {              \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                   \
+    return FUNC(x, y);                                                  \
+  }                                                                     \
+  inline long double FUNC(long double x, long double y) {               \
+    using std::FUNC;                                                    \
+    return FUNC(x, y);                                                  \
+  }                                                                     \
+  KOKKOS_INLINE_FUNCTION float FUNC##f(float x, float y) {              \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                   \
+    return FUNC(x, y);                                                  \
+  }                                                                     \
+  inline long double FUNC##l(long double x, long double y) {            \
+    using std::FUNC;                                                    \
+    return FUNC(x, y);                                                  \
+  }                                                                     \
+  template <class T1, class T2>                                         \
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<                              \
+      std::is_arithmetic<T1>::value && std::is_arithmetic<T2>::value && \
+          !std::is_same<T1, long double>::value &&                      \
+          !std::is_same<T2, long double>::value,                        \
+      Kokkos::Impl::promote_2_t<T1, T2>>                                \
+  FUNC(T1 x, T2 y) {                                                    \
+    using Promoted = Kokkos::Impl::promote_2_t<T1, T2>;                 \
+    using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::FUNC;                   \
+    return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y));    \
+  }                                                                     \
+  template <class T1, class T2>                                         \
+  inline std::enable_if_t<std::is_arithmetic<T1>::value &&              \
+                              std::is_arithmetic<T2>::value &&          \
+                              (std::is_same<T1, long double>::value ||  \
+                               std::is_same<T2, long double>::value),   \
+                          long double>                                  \
+  FUNC(T1 x, T2 y) {                                                    \
+    using Promoted = Kokkos::Impl::promote_2_t<T1, T2>;                 \
+    static_assert(std::is_same<Promoted, long double>::value, "");      \
+    using std::FUNC;                                                    \
+    return FUNC(static_cast<Promoted>(x), static_cast<Promoted>(y));    \
+  }                                                                     \
+  KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(        \
+      namespace Experimental {                                          \
+        using ::Kokkos::FUNC;                                           \
+        using ::Kokkos::FUNC##f;                                        \
+        using ::Kokkos::FUNC##l;                                        \
+      })
+// Basic operations
+KOKKOS_INLINE_FUNCTION int abs(int n) {
+  using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
+  return abs(n);
+}
+KOKKOS_INLINE_FUNCTION long abs(long n) {
+// FIXME_NVHPC ptxas fatal   : unresolved extern function 'labs'
+#ifdef KOKKOS_COMPILER_NVHPC
+  return n > 0 ? n : -n;
+#else
+  using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
+  return abs(n);
+#endif
+}
+KOKKOS_INLINE_FUNCTION long long abs(long long n) {
+// FIXME_NVHPC ptxas fatal   : unresolved extern function 'labs'
+#ifdef KOKKOS_COMPILER_NVHPC
+  return n > 0 ? n : -n;
+#else
+  using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
+  return abs(n);
+#endif
+}
+KOKKOS_INLINE_FUNCTION float abs(float x) {
+  using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
+  return abs(x);
+}
+KOKKOS_INLINE_FUNCTION double abs(double x) {
+  using KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE::abs;
+  return abs(x);
+}
+inline long double abs(long double x) {
+  using std::abs;
+  return abs(x);
+}
+KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(
+    namespace Experimental { using ::Kokkos::abs; })
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(fabs)
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(fmod)
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(remainder)
+// remquo
+// fma
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(fmax)
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(fmin)
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(fdim)
+#ifndef KOKKOS_ENABLE_SYCL
+KOKKOS_INLINE_FUNCTION float nanf(char const* arg) { return ::nanf(arg); }
+KOKKOS_INLINE_FUNCTION double nan(char const* arg) { return ::nan(arg); }
+#else
+// FIXME_SYCL
+// sycl::nan does not follow the C/C++ standard library and takes an unsigned
+// integer as argument.  The current implementation does not attempt to convert
+// the character string arg into the quiet NaN value.
+KOKKOS_INLINE_FUNCTION float nanf(char const*) { return sycl::nan(0u); }
+KOKKOS_INLINE_FUNCTION double nan(char const*) { return sycl::nan(0ul); }
+#endif
+inline long double nanl(char const* arg) { return ::nanl(arg); }
+KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED(
+    namespace Experimental {
+      using ::Kokkos::nan;
+      using ::Kokkos::nanf;
+      using ::Kokkos::nanl;
+    })
+// Exponential functions
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(exp)
+// FIXME_NVHPC nvc++ has issues with exp2
+#ifndef KOKKOS_COMPILER_NVHPC
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(exp2)
+#else
+KOKKOS_INLINE_FUNCTION float exp2(float val) {
+  constexpr float ln2 = 0.693147180559945309417232121458176568L;
+  return exp(ln2 * val);
+}
+KOKKOS_INLINE_FUNCTION double exp2(double val) {
+  constexpr double ln2 = 0.693147180559945309417232121458176568L;
+  return exp(ln2 * val);
+}
+inline long double exp2(long double val) {
+  constexpr long double ln2 = 0.693147180559945309417232121458176568L;
+  return exp(ln2 * val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION double exp2(T val) {
+  constexpr double ln2 = 0.693147180559945309417232121458176568L;
+  return exp(ln2 * static_cast<double>(val));
+}
+#endif
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(expm1)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(log)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(log10)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(log2)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(log1p)
+// Power functions
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(pow)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(sqrt)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(cbrt)
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(hypot)
+// Trigonometric functions
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(sin)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(cos)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(tan)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(asin)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(acos)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(atan)
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(atan2)
+// Hyperbolic functions
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(sinh)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(cosh)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(tanh)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(asinh)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(acosh)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(atanh)
+// Error and gamma functions
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(erf)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(erfc)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(tgamma)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(lgamma)
+// Nearest integer floating point operations
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(ceil)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(floor)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(trunc)
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(round)
+// lround
+// llround
+// FIXME_SYCL not available as of current SYCL 2020 specification (revision 4)
+#ifndef KOKKOS_ENABLE_SYCL  // FIXME_SYCL
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(nearbyint)
+#endif
+// rint
+// lrint
+// llrint
+// Floating point manipulation functions
+// frexp
+// ldexp
+// modf
+// scalbn
+// scalbln
+// ilog
+KOKKOS_IMPL_MATH_UNARY_FUNCTION(logb)
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(nextafter)
+// nexttoward
+KOKKOS_IMPL_MATH_BINARY_FUNCTION(copysign)
+// Classification and comparison
+// fpclassify
+KOKKOS_IMPL_MATH_UNARY_PREDICATE(isfinite)
+KOKKOS_IMPL_MATH_UNARY_PREDICATE(isinf)
+KOKKOS_IMPL_MATH_UNARY_PREDICATE(isnan)
+// isnormal
+KOKKOS_IMPL_MATH_UNARY_PREDICATE(signbit)
+// isgreater
+// isgreaterequal
+// isless
+// islessequal
+// islessgreater
+// isunordered
+
+#undef KOKKOS_IMPL_MATH_FUNCTIONS_DEFINED_IF_DEPRECATED_CODE_ENABLED
+#undef KOKKOS_IMPL_MATH_FUNCTIONS_NAMESPACE
+#undef KOKKOS_IMPL_MATH_UNARY_FUNCTION
+#undef KOKKOS_IMPL_MATH_UNARY_PREDICATE
+#undef KOKKOS_IMPL_MATH_BINARY_FUNCTION
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHFUNCTIONS
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHFUNCTIONS
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalSpecialFunctions.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_MathematicalSpecialFunctions.hpp
new file mode 100644 (file)
index 0000000..07da1db
--- /dev/null
@@ -0,0 +1,1285 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_MATHEMATICAL_SPECIAL_FUNCTIONS_HPP
+#define KOKKOS_MATHEMATICAL_SPECIAL_FUNCTIONS_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHSPECFUNCTIONS
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <cmath>
+#include <algorithm>
+#include <type_traits>
+#include <Kokkos_MathematicalConstants.hpp>
+#include <Kokkos_MathematicalFunctions.hpp>
+#include <Kokkos_NumericTraits.hpp>
+#include <Kokkos_Complex.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+//! Compute exponential integral E1(x) (x > 0).
+template <class RealType>
+KOKKOS_INLINE_FUNCTION RealType expint1(RealType x) {
+  // This function is a conversion of the corresponding Fortran program in
+  // S. Zhang & J. Jin "Computation of Special Functions" (Wiley, 1996).
+  using Kokkos::exp;
+  using Kokkos::fabs;
+  using Kokkos::log;
+  using Kokkos::pow;
+  using Kokkos::Experimental::epsilon;
+  using Kokkos::Experimental::infinity;
+
+  RealType e1;
+
+  if (x < 0) {
+    e1 = -infinity<RealType>::value;
+  } else if (x == 0.0) {
+    e1 = infinity<RealType>::value;
+  } else if (x <= 1.0) {
+    e1         = 1.0;
+    RealType r = 1.0;
+    for (int k = 1; k <= 25; k++) {
+      RealType k_real = static_cast<RealType>(k);
+      r               = -r * k_real * x / pow(k_real + 1.0, 2.0);
+      e1              = e1 + r;
+      if (fabs(r) <= fabs(e1) * epsilon<RealType>::value) break;
+    }
+    e1 = -0.5772156649015328 - log(x) + x * e1;
+  } else {
+    int m       = 20 + static_cast<int>(80.0 / x);
+    RealType t0 = 0.0;
+    for (int k = m; k >= 1; k--) {
+      RealType k_real = static_cast<RealType>(k);
+      t0              = k_real / (1.0 + k_real / (x + t0));
+    }
+    e1 = exp(-x) * (1.0 / (x + t0));
+  }
+  return e1;
+}
+
+//! Compute error function erf(z) for z=cmplx(x,y).
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> erf(
+    const Kokkos::complex<RealType>& z) {
+  // This function is a conversion of the corresponding Fortran program written
+  // by D.E. Amos, May,1974. D.E. Amos' revisions of Jan 86 incorporated by
+  // Ken Damrau on 27-Jan-1986 14:37:13
+  //
+  // Reference: NBS HANDBOOK OF MATHEMATICAL FUNCTIONS, AMS 55, By
+  //           M. ABRAMOWITZ AND I.A. STEGUN, December,1955.
+  // Summary:
+  //  If x < 0, z is replaced by -z and all computation is done in the right
+  //  half lane, except for z inside the circle abs(z)<=2, since
+  //  erf(-z)=-erf(z). The regions for computation are divided as follows
+  //      (1)  abs(z)<=2 - Power series, NBS Handbook, p. 298
+  //      (2)  abs(z)>2 and x>1 - continued fraction, NBS Handbook, p. 298
+  //      (3)  abs(z)>2 and 0<=x<=1 and abs(y)<6 - series, NBS Handbook, p. 299
+  //      (4)  abs(z)>2 and 0<=x<=1 and abs(y)>=6 - asymptotic expansion
+  //  Error condition: abs(z^2) > 670 is a fatal overflow error
+  using Kokkos::cos;
+  using Kokkos::exp;
+  using Kokkos::fabs;
+  using Kokkos::sin;
+  using Kokkos::Experimental::epsilon;
+  using Kokkos::Experimental::infinity;
+
+  using CmplxType = Kokkos::complex<RealType>;
+
+  constexpr auto inf = infinity<RealType>::value;
+  constexpr auto tol = epsilon<RealType>::value;
+
+  const RealType fnorm = 1.12837916709551;
+  const RealType gnorm = 0.564189583547756;
+  const RealType eh    = 0.606530659712633;
+  const RealType ef    = 0.778800783071405;
+  // const RealType tol   = 1.0e-13;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+
+  CmplxType cans;
+
+  RealType az = Kokkos::abs(z);
+  if (az <= 2.0) {  // Series for abs(z)<=2.0
+    CmplxType cz    = z * z;
+    CmplxType accum = CmplxType(1.0, 0.0);
+    CmplxType term  = accum;
+    RealType ak     = 1.5;
+    for (int i = 1; i <= 35; i++) {
+      term  = term * cz / ak;
+      accum = accum + term;
+      if (Kokkos::abs(term) <= tol) break;
+      ak = ak + 1.0;
+    }
+    cz          = -cz;
+    RealType er = cz.real();
+    RealType ei = cz.imag();
+    accum       = accum * z * fnorm;
+    cz          = exp(er) * CmplxType(cos(ei), sin(ei));
+    cans        = accum * cz;
+  }       // end (az <= 2.0)
+  else {  //(az > 2.0)
+    CmplxType zp = z;
+    if (z.real() < 0.0) zp = -z;
+    CmplxType cz = zp * zp;
+    RealType xp  = zp.real();
+    RealType yp  = zp.imag();
+    if (xp > 1.0) {
+      // continued fraction for erfc(z), abs(Z)>2
+      int n          = static_cast<int>(100.0 / az + 5.0);
+      int fn         = n;
+      CmplxType term = cz;
+      for (int i = 1; i <= n; i++) {
+        RealType fnh = fn - 0.5;
+        term         = cz + (fnh * term) / (fn + term);
+        fn           = fn - 1;
+      }
+      if (Kokkos::abs(cz) > 670.0) return CmplxType(inf, inf);
+      cz              = -cz;
+      RealType er     = cz.real();
+      RealType ei     = cz.imag();
+      cz              = exp(er) * CmplxType(cos(ei), sin(ei));
+      CmplxType accum = zp * gnorm * cz;
+      cans            = 1.0 - accum / term;
+      if (z.real() < 0.0) cans = -cans;
+    }       // end (xp > 1.0)
+    else {  //(xp <= 1.0)
+      if (fabs(yp) <
+          6.0) {  // Series (3) for abs(z)>2 and 0<=xp<=1 and abs(yp)<6
+        RealType s1   = 0.0;
+        RealType s2   = 0.0;
+        RealType x2   = xp * xp;
+        RealType fx2  = 4.0 * x2;
+        RealType tx   = xp + xp;
+        RealType xy   = xp * yp;
+        RealType sxyh = sin(xy);
+        RealType sxy  = sin(xy + xy);
+        RealType cxy  = cos(xy + xy);
+        RealType fn   = 1.0;
+        RealType fnh  = 0.5;
+        RealType ey   = exp(yp);
+        RealType en   = ey;
+        RealType ehn  = eh;
+        RealType un   = ef;
+        RealType vn   = 1.0;
+        for (int i = 1; i <= 50; i++) {
+          RealType ren = 1.0 / en;
+          RealType csh = en + ren;
+          RealType tm  = xp * csh;
+          RealType ssh = en - ren;
+          RealType tmp = fnh * ssh;
+          RealType rn  = tx - tm * cxy + tmp * sxy;
+          RealType ain = tm * sxy + tmp * cxy;
+          RealType cf  = un / (vn + fx2);
+          rn           = cf * rn;
+          ain          = cf * ain;
+          s1           = s1 + rn;
+          s2           = s2 + ain;
+          if ((fabs(rn) + fabs(ain)) < tol * (fabs(s1) + fabs(s2))) break;
+          un  = un * ehn * ef;
+          ehn = ehn * eh;
+          en  = en * ey;
+          vn  = vn + fn + fn + 1.0;
+          fnh = fnh + 0.5;
+          fn  = fn + 1.0;
+        }
+        s1 = s1 + s1;
+        s2 = s2 + s2;
+        if (z.real() == 0.0)
+          s2 = s2 + yp;
+        else {
+          s1 = s1 + sxyh * sxyh / xp;
+          s2 = s2 + sxy / tx;
+        }
+        // Power series for erf(xp), 0<=xp<=1
+        RealType w  = 1.0;
+        RealType ak = 1.5;
+        RealType tm = 1.0;
+        for (int i = 1; i <= 17; i++) {
+          tm = tm * x2 / ak;
+          w  = w + tm;
+          if (tm <= tol) break;
+          ak = ak + 1.0;
+        }
+        RealType ex = exp(-x2);
+        w           = w * xp * fnorm * ex;
+        RealType cf = ex / pi;
+        s1          = cf * s1 + w;
+        s2          = cf * s2;
+        cans        = CmplxType(s1, s2);
+        if (z.real() < 0.0) cans = -cans;
+      }       // end (abs(yp) < 6.0)
+      else {  //(abs(YP)>=6.0)
+        // Asymptotic expansion for 0<=xp<=1 and abs(yp)>=6
+        CmplxType rcz   = 0.5 / cz;
+        CmplxType accum = CmplxType(1.0, 0.0);
+        CmplxType term  = accum;
+        RealType ak     = 1.0;
+        for (int i = 1; i <= 35; i++) {
+          term  = -term * ak * rcz;
+          accum = accum + term;
+          if (Kokkos::abs(term) / Kokkos::abs(accum) <= tol) break;
+          ak = ak + 2.0;
+        }
+        accum       = accum * gnorm / zp;
+        cz          = -cz;
+        RealType er = cz.real();
+        if (fabs(er) > 670.0) return CmplxType(inf, inf);
+        RealType ei = cz.imag();
+        cz          = exp(er) * CmplxType(cos(ei), sin(ei));
+        cans        = 1.0 - accum * cz;
+        if (z.real() < 0.0) cans = -cans;
+      }  // end (abs(YP)>=6.0)
+    }    // end (xp <= 1.0)
+  }      // end (az > 2.0)
+  return cans;
+}
+
+//! Compute scaled complementary error function erfcx(z)=exp(z^2)*erfc(z)
+//! for z=cmplx(x,y).
+template <class RealType>
+KOKKOS_INLINE_FUNCTION Kokkos::complex<RealType> erfcx(
+    const Kokkos::complex<RealType>& z) {
+  // This function is a conversion of the corresponding Fortran program written
+  // by D.E. Amos, May,1974. D.E. Amos' revisions of Jan 86 incorporated by
+  // Ken Damrau on 27-Jan-1986 14:37:13
+  //
+  // Reference: NBS HANDBOOK OF MATHEMATICAL FUNCTIONS, AMS 55, By
+  //           M. ABRAMOWITZ AND I.A. STEGUN, December,1955.
+  // Summary:
+  //  If x < 0, z is replaced by -z and all computation is done in the right
+  //  half lane, except for z inside the circle abs(z)<=2, since
+  //  erfc(-z)=2-erfc(z). The regions for computation are divided as follows
+  //      (1)  abs(z)<=2 - Power series, NBS Handbook, p. 298
+  //      (2)  abs(z)>2 and x>1 - continued fraction, NBS Handbook, p. 298
+  //      (3)  abs(z)>2 and 0<=x<=1 and abs(y)<6 - series, NBS Handbook, p. 299
+  //      (4)  abs(z)>2 and 0<=x<=1 and abs(y)>=6 - asymptotic expansion
+  // Error condition: abs(z^2) > 670 is a fatal overflow error when x<0
+  using Kokkos::cos;
+  using Kokkos::exp;
+  using Kokkos::fabs;
+  using Kokkos::isinf;
+  using Kokkos::sin;
+  using Kokkos::Experimental::epsilon;
+  using Kokkos::Experimental::infinity;
+
+  using CmplxType = Kokkos::complex<RealType>;
+
+  constexpr auto inf = infinity<RealType>::value;
+  constexpr auto tol = epsilon<RealType>::value;
+
+  const RealType fnorm = 1.12837916709551;
+  constexpr auto gnorm = Kokkos::Experimental::inv_sqrtpi_v<RealType>;
+  const RealType eh    = 0.606530659712633;
+  const RealType ef    = 0.778800783071405;
+  // const RealType tol   = 1.0e-13;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+
+  CmplxType cans;
+
+  if ((isinf(z.real())) && (z.real() > 0)) {
+    cans = CmplxType(0.0, 0.0);
+    return cans;
+  }
+  if ((isinf(z.real())) && (z.real() < 0)) {
+    cans = CmplxType(inf, inf);
+    return cans;
+  }
+
+  RealType az = Kokkos::abs(z);
+  if (az <= 2.0) {  // Series for abs(z)<=2.0
+    CmplxType cz    = z * z;
+    CmplxType accum = CmplxType(1.0, 0.0);
+    CmplxType term  = accum;
+    RealType ak     = 1.5;
+    for (int i = 1; i <= 35; i++) {
+      term  = term * cz / ak;
+      accum = accum + term;
+      if (Kokkos::abs(term) <= tol) break;
+      ak = ak + 1.0;
+    }
+    cz          = -cz;
+    RealType er = cz.real();
+    RealType ei = cz.imag();
+    accum       = accum * z * fnorm;
+    cz          = exp(er) * CmplxType(cos(ei), sin(ei));
+    cans        = 1.0 / cz - accum;
+  }       // end (az <= 2.0)
+  else {  //(az > 2.0)
+    CmplxType zp = z;
+    if (z.real() < 0.0) zp = -z;
+    CmplxType cz = zp * zp;
+    RealType xp  = zp.real();
+    RealType yp  = zp.imag();
+    if (xp > 1.0) {
+      // continued fraction for erfc(z), abs(z)>2
+      int n          = static_cast<int>(100.0 / az + 5.0);
+      int fn         = n;
+      CmplxType term = cz;
+      for (int i = 1; i <= n; i++) {
+        RealType fnh = fn - 0.5;
+        term         = cz + (fnh * term) / (fn + term);
+        fn           = fn - 1;
+      }
+      cans = zp * gnorm / term;
+      if (z.real() >= 0.0) return cans;
+      if (Kokkos::abs(cz) > 670.0) return CmplxType(inf, inf);
+      ;
+      cz          = -cz;
+      RealType er = cz.real();
+      RealType ei = cz.imag();
+      cz          = exp(er) * CmplxType(cos(ei), sin(ei));
+      cz          = 1.0 / cz;
+      cans        = cz + cz - cans;
+    }       // end (xp > 1.0)
+    else {  //(xp <= 1.0)
+      if (fabs(yp) <
+          6.0) {  // Series (3) for abs(z)>2 and 0<=xp<=1 and abs(yp)<6
+        RealType s1   = 0.0;
+        RealType s2   = 0.0;
+        RealType x2   = xp * xp;
+        RealType fx2  = 4.0 * x2;
+        RealType tx   = xp + xp;
+        RealType xy   = xp * yp;
+        RealType sxyh = sin(xy);
+        RealType sxy  = sin(xy + xy);
+        RealType cxy  = cos(xy + xy);
+        RealType fn   = 1.0;
+        RealType fnh  = 0.5;
+        RealType ey   = exp(yp);
+        RealType en   = ey;
+        RealType ehn  = eh;
+        RealType un   = ef;
+        RealType vn   = 1.0;
+        for (int i = 1; i <= 50; i++) {
+          RealType ren = 1.0 / en;
+          RealType csh = en + ren;
+          RealType tm  = xp * csh;
+          RealType ssh = en - ren;
+          RealType tmp = fnh * ssh;
+          RealType rn  = tx - tm * cxy + tmp * sxy;
+          RealType ain = tm * sxy + tmp * cxy;
+          RealType cf  = un / (vn + fx2);
+          rn           = cf * rn;
+          ain          = cf * ain;
+          s1           = s1 + rn;
+          s2           = s2 + ain;
+          if ((fabs(rn) + fabs(ain)) < tol * (fabs(s1) + fabs(s2))) break;
+          un  = un * ehn * ef;
+          ehn = ehn * eh;
+          en  = en * ey;
+          vn  = vn + fn + fn + 1.0;
+          fnh = fnh + 0.5;
+          fn  = fn + 1.0;
+        }
+        s1 = s1 + s1;
+        s2 = s2 + s2;
+        if (z.real() == 0.0)
+          s2 = s2 + yp;
+        else {
+          s1 = s1 + sxyh * sxyh / xp;
+          s2 = s2 + sxy / tx;
+        }
+        // Power series for erf(xp), 0<=xp<=1
+        RealType w  = 1.0;
+        RealType ak = 1.5;
+        RealType tm = 1.0;
+        for (int i = 1; i <= 17; i++) {
+          tm = tm * x2 / ak;
+          w  = w + tm;
+          if (tm <= tol) break;
+          ak = ak + 1.0;
+        }
+        RealType ex   = exp(-x2);
+        w             = w * xp * fnorm * ex;
+        CmplxType rcz = CmplxType(cxy, sxy);
+        RealType y2   = yp * yp;
+        cz            = exp(x2 - y2) * rcz;
+        rcz           = exp(-y2) * rcz;
+        if (z.real() >= 0.0)
+          cans = cz * (1.0 - w) - rcz * CmplxType(s1, s2) / pi;
+        else
+          cans = cz * (1.0 + w) + rcz * CmplxType(s1, s2) / pi;
+      }       // end (abs(yp) < 6.0)
+      else {  //(abs(YP)>=6.0)
+        // Asymptotic expansion for 0<=xp<=1 and abs(yp)>=6
+        CmplxType rcz   = 0.5 / cz;
+        CmplxType accum = CmplxType(1.0, 0.0);
+        CmplxType term  = accum;
+        RealType ak     = 1.0;
+        for (int i = 1; i <= 35; i++) {
+          term  = -term * ak * rcz;
+          accum = accum + term;
+          if (Kokkos::abs(term) / Kokkos::abs(accum) <= tol) break;
+          ak = ak + 2.0;
+        }
+        accum = accum * gnorm / zp;
+        if (z.real() < 0.0) accum = -accum;
+        cans = accum;
+      }  // end (abs(YP)>=6.0)
+    }    // end (xp <= 1.0)
+  }      // end (az > 2.0)
+  return cans;
+}
+
+//! Compute scaled complementary error function erfcx(x)=exp(x^2)*erfc(x)
+//! for real x
+template <class RealType>
+KOKKOS_INLINE_FUNCTION RealType erfcx(RealType x) {
+  using CmplxType = Kokkos::complex<RealType>;
+  // Note: using erfcx(complex) for now
+  // TODO: replace with an implementation of erfcx(real)
+  CmplxType zin  = CmplxType(x, 0.0);
+  CmplxType zout = erfcx(zin);
+  return zout.real();
+}
+
+//! Compute Bessel function J0(z) of the first kind of order zero
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_j0(const CmplxType& z,
+                                               const RealType& joint_val = 25,
+                                               const IntType& bw_start   = 70) {
+  // This function is converted and modified from the corresponding Fortran
+  // program CJYNB in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  // Input :  z         --- Complex argument
+  //         joint_val --- Joint point of abs(z) separating small and large
+  //                       argument regions
+  //         bw_start  --- Starting point for backward recurrence
+  // Output:  cbj0      --- J0(z)
+  using Kokkos::fabs;
+  using Kokkos::pow;
+
+  CmplxType cbj0;
+  constexpr auto pi    = Kokkos::Experimental::pi_v<RealType>;
+  const RealType a[12] = {
+      -0.703125e-01,           0.112152099609375e+00,   -0.5725014209747314e+00,
+      0.6074042001273483e+01,  -0.1100171402692467e+03, 0.3038090510922384e+04,
+      -0.1188384262567832e+06, 0.6252951493434797e+07,  -0.4259392165047669e+09,
+      0.3646840080706556e+11,  -0.3833534661393944e+13, 0.4854014686852901e+15};
+  const RealType b[12] = {0.732421875e-01,        -0.2271080017089844e+00,
+                          0.1727727502584457e+01, -0.2438052969955606e+02,
+                          0.5513358961220206e+03, -0.1825775547429318e+05,
+                          0.8328593040162893e+06, -0.5006958953198893e+08,
+                          0.3836255180230433e+10, -0.3649010818849833e+12,
+                          0.4218971570284096e+14, -0.5827244631566907e+16};
+
+  RealType r2p = 2.0 / pi;
+  RealType a0  = Kokkos::abs(z);
+  RealType y0  = fabs(z.imag());
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cbj0 = CmplxType(1.0, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:25)
+      CmplxType cbs = CmplxType(0.0, 0.0);
+      CmplxType csu = CmplxType(0.0, 0.0);
+      CmplxType csv = CmplxType(0.0, 0.0);
+      CmplxType cf2 = CmplxType(0.0, 0.0);
+      CmplxType cf1 = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 70)
+        cf                    = 2.0 * (k + 1.0) / z * cf1 - cf2;
+        RealType tmp_exponent = static_cast<RealType>(k / 2);
+        if (k == 0) cbj0 = cf;
+        if ((k == 2 * (k / 2)) && (k != 0)) {
+          if (y0 <= 1.0)
+            cbs = cbs + 2.0 * cf;
+          else
+            cbs = cbs + pow(-1.0, tmp_exponent) * 2.0 * cf;
+          csu = csu + pow(-1.0, tmp_exponent) * cf / k;
+        } else if (k > 1) {
+          csv = csv + pow(-1.0, tmp_exponent) * k / (k * k - 1.0) * cf;
+        }
+        cf2 = cf1;
+        cf1 = cf;
+      }
+      if (y0 <= 1.0)
+        cs0 = cbs + cf;
+      else
+        cs0 = (cbs + cf) / Kokkos::cos(z);
+      cbj0 = cbj0 / cs0;
+    } else {  // Using asymptotic expansion (5.2.5) for |z|>joint_val
+              // (default:25)
+      CmplxType ct1 = z1 - 0.25 * pi;
+      CmplxType cp0 = CmplxType(1.0, 0.0);
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.9)
+        cp0 = cp0 + a[k - 1] * Kokkos::pow(z1, -2.0 * k);
+      }
+      CmplxType cq0 = -0.125 / z1;
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.10)
+        cq0 = cq0 + b[k - 1] * Kokkos::pow(z1, -2.0 * k - 1);
+      }
+      CmplxType cu = Kokkos::sqrt(r2p / z1);
+      cbj0         = cu * (cp0 * Kokkos::cos(ct1) - cq0 * Kokkos::sin(ct1));
+    }
+  }
+  return cbj0;
+}
+
+//! Compute Bessel function Y0(z) of the second kind of order zero
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_y0(const CmplxType& z,
+                                               const RealType& joint_val = 25,
+                                               const IntType& bw_start   = 70) {
+  // This function is converted and modified from the corresponding Fortran
+  // program CJYNB in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  //    Input :  z         --- Complex argument
+  //             joint_val --- Joint point of abs(z) separating small and large
+  //                           argument regions
+  //             bw_start  --- Starting point for backward recurrence
+  //    Output:  cby0      --- Y0(z)
+  using Kokkos::fabs;
+  using Kokkos::pow;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType cby0, cbj0;
+  constexpr auto pi    = Kokkos::Experimental::pi_v<RealType>;
+  constexpr auto el    = Kokkos::Experimental::egamma_v<RealType>;
+  const RealType a[12] = {
+      -0.703125e-01,           0.112152099609375e+00,   -0.5725014209747314e+00,
+      0.6074042001273483e+01,  -0.1100171402692467e+03, 0.3038090510922384e+04,
+      -0.1188384262567832e+06, 0.6252951493434797e+07,  -0.4259392165047669e+09,
+      0.3646840080706556e+11,  -0.3833534661393944e+13, 0.4854014686852901e+15};
+  const RealType b[12] = {0.732421875e-01,        -0.2271080017089844e+00,
+                          0.1727727502584457e+01, -0.2438052969955606e+02,
+                          0.5513358961220206e+03, -0.1825775547429318e+05,
+                          0.8328593040162893e+06, -0.5006958953198893e+08,
+                          0.3836255180230433e+10, -0.3649010818849833e+12,
+                          0.4218971570284096e+14, -0.5827244631566907e+16};
+
+  RealType r2p = 2.0 / pi;
+  RealType a0  = Kokkos::abs(z);
+  RealType y0  = fabs(z.imag());
+  CmplxType ci = CmplxType(0.0, 1.0);
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cby0 = -CmplxType(inf, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:25)
+      CmplxType cbs = CmplxType(0.0, 0.0);
+      CmplxType csu = CmplxType(0.0, 0.0);
+      CmplxType csv = CmplxType(0.0, 0.0);
+      CmplxType cf2 = CmplxType(0.0, 0.0);
+      CmplxType cf1 = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0, ce;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 70)
+        cf                    = 2.0 * (k + 1.0) / z * cf1 - cf2;
+        RealType tmp_exponent = static_cast<RealType>(k / 2);
+        if (k == 0) cbj0 = cf;
+        if ((k == 2 * (k / 2)) && (k != 0)) {
+          if (y0 <= 1.0)
+            cbs = cbs + 2.0 * cf;
+          else
+            cbs = cbs + pow(-1.0, tmp_exponent) * 2.0 * cf;
+          csu = csu + pow(-1.0, tmp_exponent) * cf / k;
+        } else if (k > 1) {
+          csv = csv + pow(-1.0, tmp_exponent) * k / (k * k - 1.0) * cf;
+        }
+        cf2 = cf1;
+        cf1 = cf;
+      }
+      if (y0 <= 1.0)
+        cs0 = cbs + cf;
+      else
+        cs0 = (cbs + cf) / Kokkos::cos(z);
+      cbj0 = cbj0 / cs0;
+      ce   = Kokkos::log(z / 2.0) + el;
+      cby0 = r2p * (ce * cbj0 - 4.0 * csu / cs0);
+    } else {  // Using asymptotic expansion (5.2.6) for |z|>joint_val
+              // (default:25)
+      CmplxType ct1 = z1 - 0.25 * pi;
+      CmplxType cp0 = CmplxType(1.0, 0.0);
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.9)
+        cp0 = cp0 + a[k - 1] * Kokkos::pow(z1, -2.0 * k);
+      }
+      CmplxType cq0 = -0.125 / z1;
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.10)
+        cq0 = cq0 + b[k - 1] * Kokkos::pow(z1, -2.0 * k - 1);
+      }
+      CmplxType cu = Kokkos::sqrt(r2p / z1);
+      cbj0         = cu * (cp0 * Kokkos::cos(ct1) - cq0 * Kokkos::sin(ct1));
+      cby0         = cu * (cp0 * Kokkos::sin(ct1) + cq0 * Kokkos::cos(ct1));
+
+      if (z.real() < 0.0) {  // Apply (5.4.2)
+        if (z.imag() < 0.0) cby0 = cby0 - 2.0 * ci * cbj0;
+        if (z.imag() >= 0.0) cby0 = cby0 + 2.0 * ci * cbj0;
+      }
+    }
+  }
+  return cby0;
+}
+
+//! Compute Bessel function J1(z) of the first kind of order one
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_j1(const CmplxType& z,
+                                               const RealType& joint_val = 25,
+                                               const IntType& bw_start   = 70) {
+  // This function is converted and modified from the corresponding Fortran
+  // program CJYNB in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  //    Input :  z         --- Complex argument
+  //             joint_val --- Joint point of abs(z) separating small and large
+  //                           argument regions
+  //             bw_start  --- Starting point for backward recurrence
+  //    Output:  cbj1      --- J1(z)
+  using Kokkos::fabs;
+  using Kokkos::pow;
+
+  CmplxType cbj1;
+  constexpr auto pi     = Kokkos::Experimental::pi_v<RealType>;
+  const RealType a1[12] = {0.1171875e+00,          -0.144195556640625e+00,
+                           0.6765925884246826e+00, -0.6883914268109947e+01,
+                           0.1215978918765359e+03, -0.3302272294480852e+04,
+                           0.1276412726461746e+06, -0.6656367718817688e+07,
+                           0.4502786003050393e+09, -0.3833857520742790e+11,
+                           0.4011838599133198e+13, -0.5060568503314727e+15};
+  const RealType b1[12] = {
+      -0.1025390625e+00,       0.2775764465332031e+00,  -0.1993531733751297e+01,
+      0.2724882731126854e+02,  -0.6038440767050702e+03, 0.1971837591223663e+05,
+      -0.8902978767070678e+06, 0.5310411010968522e+08,  -0.4043620325107754e+10,
+      0.3827011346598605e+12,  -0.4406481417852278e+14, 0.6065091351222699e+16};
+
+  RealType r2p = 2.0 / pi;
+  RealType a0  = Kokkos::abs(z);
+  RealType y0  = fabs(z.imag());
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cbj1 = CmplxType(0.0, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:25)
+      CmplxType cbs = CmplxType(0.0, 0.0);
+      CmplxType csu = CmplxType(0.0, 0.0);
+      CmplxType csv = CmplxType(0.0, 0.0);
+      CmplxType cf2 = CmplxType(0.0, 0.0);
+      CmplxType cf1 = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 70)
+        cf                    = 2.0 * (k + 1.0) / z * cf1 - cf2;
+        RealType tmp_exponent = static_cast<RealType>(k / 2);
+        if (k == 1) cbj1 = cf;
+        if ((k == 2 * (k / 2)) && (k != 0)) {
+          if (y0 <= 1.0)
+            cbs = cbs + 2.0 * cf;
+          else
+            cbs = cbs + pow(-1.0, tmp_exponent) * 2.0 * cf;
+          csu = csu + pow(-1.0, tmp_exponent) * cf / k;
+        } else if (k > 1) {
+          csv = csv + pow(-1.0, tmp_exponent) * k / (k * k - 1.0) * cf;
+        }
+        cf2 = cf1;
+        cf1 = cf;
+      }
+      if (y0 <= 1.0)
+        cs0 = cbs + cf;
+      else
+        cs0 = (cbs + cf) / Kokkos::cos(z);
+      cbj1 = cbj1 / cs0;
+    } else {  // Using asymptotic expansion (5.2.5) for |z|>joint_val
+              // (default:25)
+      CmplxType ct2 = z1 - 0.75 * pi;
+      CmplxType cp1 = CmplxType(1.0, 0.0);
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.11)
+        cp1 = cp1 + a1[k - 1] * Kokkos::pow(z1, -2.0 * k);
+      }
+      CmplxType cq1 = 0.375 / z1;
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.12)
+        cq1 = cq1 + b1[k - 1] * Kokkos::pow(z1, -2.0 * k - 1);
+      }
+      CmplxType cu = Kokkos::sqrt(r2p / z1);
+      cbj1         = cu * (cp1 * Kokkos::cos(ct2) - cq1 * Kokkos::sin(ct2));
+
+      if (real(z) < 0.0) {  // Apply (5.4.2)
+        cbj1 = -cbj1;
+      }
+    }
+  }
+  return cbj1;
+}
+
+//! Compute Bessel function Y1(z) of the second kind of order one
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_y1(const CmplxType& z,
+                                               const RealType& joint_val = 25,
+                                               const IntType& bw_start   = 70) {
+  // This function is converted and modified from the corresponding Fortran
+  // program CJYNB in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  //    Input :  z         --- Complex argument
+  //             joint_val --- Joint point of abs(z) separating small and large
+  //                           argument regions
+  //             bw_start  --- Starting point for backward recurrence
+  //    Output:  cby1      --- Y1(z)
+  using Kokkos::fabs;
+  using Kokkos::pow;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType cby1, cbj0, cbj1, cby0;
+  constexpr auto pi     = Kokkos::Experimental::pi_v<RealType>;
+  constexpr auto el     = Kokkos::Experimental::egamma_v<RealType>;
+  const RealType a1[12] = {0.1171875e+00,          -0.144195556640625e+00,
+                           0.6765925884246826e+00, -0.6883914268109947e+01,
+                           0.1215978918765359e+03, -0.3302272294480852e+04,
+                           0.1276412726461746e+06, -0.6656367718817688e+07,
+                           0.4502786003050393e+09, -0.3833857520742790e+11,
+                           0.4011838599133198e+13, -0.5060568503314727e+15};
+  const RealType b1[12] = {
+      -0.1025390625e+00,       0.2775764465332031e+00,  -0.1993531733751297e+01,
+      0.2724882731126854e+02,  -0.6038440767050702e+03, 0.1971837591223663e+05,
+      -0.8902978767070678e+06, 0.5310411010968522e+08,  -0.4043620325107754e+10,
+      0.3827011346598605e+12,  -0.4406481417852278e+14, 0.6065091351222699e+16};
+
+  RealType r2p = 2.0 / pi;
+  RealType a0  = Kokkos::abs(z);
+  RealType y0  = fabs(z.imag());
+  CmplxType ci = CmplxType(0.0, 1.0);
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cby1 = -CmplxType(inf, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:25)
+      CmplxType cbs = CmplxType(0.0, 0.0);
+      CmplxType csu = CmplxType(0.0, 0.0);
+      CmplxType csv = CmplxType(0.0, 0.0);
+      CmplxType cf2 = CmplxType(0.0, 0.0);
+      CmplxType cf1 = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0, ce;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 70)
+        cf                    = 2.0 * (k + 1.0) / z * cf1 - cf2;
+        RealType tmp_exponent = static_cast<RealType>(k / 2);
+        if (k == 1) cbj1 = cf;
+        if (k == 0) cbj0 = cf;
+        if ((k == 2 * (k / 2)) && (k != 0)) {
+          if (y0 <= 1.0)
+            cbs = cbs + 2.0 * cf;
+          else
+            cbs = cbs + pow(-1.0, tmp_exponent) * 2.0 * cf;
+          csu = csu + pow(-1.0, tmp_exponent) * cf / k;
+        } else if (k > 1) {
+          csv = csv + pow(-1.0, tmp_exponent) * k / (k * k - 1.0) * cf;
+        }
+        cf2 = cf1;
+        cf1 = cf;
+      }
+      if (y0 <= 1.0)
+        cs0 = cbs + cf;
+      else
+        cs0 = (cbs + cf) / Kokkos::cos(z);
+      cbj0 = cbj0 / cs0;
+      ce   = Kokkos::log(z / 2.0) + el;
+      cby0 = r2p * (ce * cbj0 - 4.0 * csu / cs0);
+      cbj1 = cbj1 / cs0;
+      cby1 = (cbj1 * cby0 - 2.0 / (pi * z)) / cbj0;
+    } else {  // Using asymptotic expansion (5.2.5) for |z|>joint_val
+              // (default:25)
+      CmplxType ct2 = z1 - 0.75 * pi;
+      CmplxType cp1 = CmplxType(1.0, 0.0);
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.11)
+        cp1 = cp1 + a1[k - 1] * Kokkos::pow(z1, -2.0 * k);
+      }
+      CmplxType cq1 = 0.375 / z1;
+      for (int k = 1; k <= 12; k++) {  // Calculate (5.2.12)
+        cq1 = cq1 + b1[k - 1] * Kokkos::pow(z1, -2.0 * k - 1);
+      }
+      CmplxType cu = Kokkos::sqrt(r2p / z1);
+      cbj1         = cu * (cp1 * Kokkos::cos(ct2) - cq1 * Kokkos::sin(ct2));
+      cby1         = cu * (cp1 * Kokkos::sin(ct2) + cq1 * Kokkos::cos(ct2));
+
+      if (z.real() < 0.0) {  // Apply (5.4.2)
+        if (z.imag() < 0.0) cby1 = -(cby1 - 2.0 * ci * cbj1);
+        if (z.imag() >= 0.0) cby1 = -(cby1 + 2.0 * ci * cbj1);
+      }
+    }
+  }
+  return cby1;
+}
+
+//! Compute modified Bessel function I0(z) of the first kind of order zero
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_i0(const CmplxType& z,
+                                               const RealType& joint_val = 25,
+                                               const IntType& bw_start   = 70) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CIKNB and CIK01 in S. Zhang & J. Jin "Computation of Special
+  // Functions" (Wiley, 1996).
+  //    Input :  z         --- Complex argument
+  //             joint_val --- Joint point of abs(z) separating small and large
+  //                           argument regions
+  //             bw_start  --- Starting point for backward recurrence
+  //    Output:  cbi0      --- I0(z)
+  CmplxType cbi0;
+  constexpr auto pi    = Kokkos::Experimental::pi_v<RealType>;
+  const RealType a[12] = {0.125,
+                          7.03125e-2,
+                          7.32421875e-2,
+                          1.1215209960938e-1,
+                          2.2710800170898e-1,
+                          5.7250142097473e-1,
+                          1.7277275025845e0,
+                          6.0740420012735e0,
+                          2.4380529699556e1,
+                          1.1001714026925e2,
+                          5.5133589612202e2,
+                          3.0380905109224e3};
+
+  RealType a0  = Kokkos::abs(z);
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cbi0 = CmplxType(1.0, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:25)
+      CmplxType cbs = CmplxType(0.0, 0.0);
+      // CmplxType csk0 = CmplxType(0.0,0.0);
+      CmplxType cf0 = CmplxType(0.0, 0.0);
+      CmplxType cf1 = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 70)
+        cf = 2.0 * (k + 1.0) * cf1 / z1 + cf0;
+        if (k == 0) cbi0 = cf;
+        // if ((k == 2*(k/2)) && (k != 0)) {
+        //  csk0 = csk0+4.0*cf/static_cast<RealType>(k);
+        //}
+        cbs = cbs + 2.0 * cf;
+        cf0 = cf1;
+        cf1 = cf;
+      }
+      cs0  = Kokkos::exp(z1) / (cbs - cf);
+      cbi0 = cbi0 * cs0;
+    } else {  // Using asymptotic expansion (6.2.1) for |z|>joint_val
+              // (default:25)
+      CmplxType ca = Kokkos::exp(z1) / Kokkos::sqrt(2.0 * pi * z1);
+      cbi0         = CmplxType(1.0, 0.0);
+      CmplxType zr = 1.0 / z1;
+      for (int k = 1; k <= 12; k++) {
+        cbi0 = cbi0 + a[k - 1] * Kokkos::pow(zr, 1.0 * k);
+      }
+      cbi0 = ca * cbi0;
+    }
+  }
+  return cbi0;
+}
+
+//! Compute modified Bessel function K0(z) of the second kind of order zero
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_k0(const CmplxType& z,
+                                               const RealType& joint_val = 9,
+                                               const IntType& bw_start   = 30) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CIKNB and CIK01 in S. Zhang & J. Jin "Computation of Special
+  // Functions" (Wiley, 1996).
+  //    Purpose: Compute modified Bessel function K0(z) of the second kind of
+  //             order zero for a complex argument
+  //    Input :  z         --- Complex argument
+  //             joint_val --- Joint point of abs(z) separating small and large
+  //                           argument regions
+  //             bw_start  --- Starting point for backward recurrence
+  //    Output:  cbk0      --- K0(z)
+  using Kokkos::pow;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType cbk0, cbi0;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+  constexpr auto el = Kokkos::Experimental::egamma_v<RealType>;
+
+  RealType a0  = Kokkos::abs(z);
+  CmplxType ci = CmplxType(0.0, 1.0);
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cbk0 = CmplxType(inf, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:9)
+      CmplxType cbs  = CmplxType(0.0, 0.0);
+      CmplxType csk0 = CmplxType(0.0, 0.0);
+      CmplxType cf0  = CmplxType(0.0, 0.0);
+      CmplxType cf1  = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 30)
+        cf = 2.0 * (k + 1.0) * cf1 / z1 + cf0;
+        if (k == 0) cbi0 = cf;
+        if ((k == 2 * (k / 2)) && (k != 0)) {
+          csk0 = csk0 + 4.0 * cf / static_cast<RealType>(k);
+        }
+        cbs = cbs + 2.0 * cf;
+        cf0 = cf1;
+        cf1 = cf;
+      }
+      cs0  = Kokkos::exp(z1) / (cbs - cf);
+      cbi0 = cbi0 * cs0;
+      cbk0 = -(Kokkos::log(0.5 * z1) + el) * cbi0 + cs0 * csk0;
+    } else {  // Using asymptotic expansion (6.2.2) for |z|>joint_val
+              // (default:9)
+      CmplxType ca0  = Kokkos::sqrt(pi / (2.0 * z1)) * Kokkos::exp(-z1);
+      CmplxType cbkl = CmplxType(1.0, 0.0);
+      CmplxType cr   = CmplxType(1.0, 0.0);
+      for (int k = 1; k <= 30; k++) {
+        cr   = 0.125 * cr * (0.0 - pow(2.0 * k - 1.0, 2.0)) / (k * z1);
+        cbkl = cbkl + cr;
+      }
+      cbk0 = ca0 * cbkl;
+    }
+    if (z.real() < 0.0) {  // Apply (6.4.4)
+      if (z.imag() < 0.0)
+        cbk0 = cbk0 + ci * pi * cyl_bessel_i0<CmplxType, RealType, IntType>(z);
+      if (z.imag() >= 0.0)
+        cbk0 = cbk0 - ci * pi * cyl_bessel_i0<CmplxType, RealType, IntType>(z);
+    }
+  }
+  return cbk0;
+}
+
+//! Compute modified Bessel function I1(z) of the first kind of order one
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_i1(const CmplxType& z,
+                                               const RealType& joint_val = 25,
+                                               const IntType& bw_start   = 70) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CIKNB and CIK01 in S. Zhang & J. Jin "Computation of Special
+  // Functions" (Wiley, 1996).
+  //    Input :  z         --- Complex argument
+  //             joint_val --- Joint point of abs(z) separating small and large
+  //                           argument regions
+  //             bw_start  --- Starting point for backward recurrence
+  //    Output:  cbi1      --- I1(z)
+  CmplxType cbi1;
+  constexpr auto pi    = Kokkos::Experimental::pi_v<RealType>;
+  const RealType b[12] = {-0.375,
+                          -1.171875e-1,
+                          -1.025390625e-1,
+                          -1.4419555664063e-1,
+                          -2.7757644653320e-1,
+                          -6.7659258842468e-1,
+                          -1.9935317337513,
+                          -6.8839142681099,
+                          -2.7248827311269e1,
+                          -1.2159789187654e2,
+                          -6.0384407670507e2,
+                          -3.3022722944809e3};
+
+  RealType a0  = Kokkos::abs(z);
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cbi1 = CmplxType(0.0, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:25)
+      CmplxType cbs = CmplxType(0.0, 0.0);
+      // CmplxType csk0 = CmplxType(0.0,0.0);
+      CmplxType cf0 = CmplxType(0.0, 0.0);
+      CmplxType cf1 = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 70)
+        cf = 2.0 * (k + 1.0) * cf1 / z1 + cf0;
+        if (k == 1) cbi1 = cf;
+        // if ((k == 2*(k/2)) && (k != 0)) {
+        //  csk0 = csk0+4.0*cf/static_cast<RealType>(k);
+        //}
+        cbs = cbs + 2.0 * cf;
+        cf0 = cf1;
+        cf1 = cf;
+      }
+      cs0  = Kokkos::exp(z1) / (cbs - cf);
+      cbi1 = cbi1 * cs0;
+    } else {  // Using asymptotic expansion (6.2.1) for |z|>joint_val
+              // (default:25)
+      CmplxType ca = Kokkos::exp(z1) / Kokkos::sqrt(2.0 * pi * z1);
+      cbi1         = CmplxType(1.0, 0.0);
+      CmplxType zr = 1.0 / z1;
+      for (int k = 1; k <= 12; k++) {
+        cbi1 = cbi1 + b[k - 1] * Kokkos::pow(zr, 1.0 * k);
+      }
+      cbi1 = ca * cbi1;
+    }
+    if (z.real() < 0.0) {  // Apply (6.4.4)
+      cbi1 = -cbi1;
+    }
+  }
+  return cbi1;
+}
+
+//! Compute modified Bessel function K1(z) of the second kind of order one
+//! for a complex argument
+template <class CmplxType, class RealType, class IntType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_k1(const CmplxType& z,
+                                               const RealType& joint_val = 9,
+                                               const IntType& bw_start   = 30) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CIKNB and CIK01 in S. Zhang & J. Jin "Computation of Special
+  // Functions" (Wiley, 1996).
+  //    Input :  z         --- Complex argument
+  //             joint_val --- Joint point of abs(z) separating small and large
+  //                           argument regions
+  //             bw_start  --- Starting point for backward recurrence
+  //    Output:  cbk1      --- K1(z)
+  using Kokkos::pow;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType cbk0, cbi0, cbk1, cbi1;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+  constexpr auto el = Kokkos::Experimental::egamma_v<RealType>;
+
+  RealType a0  = Kokkos::abs(z);
+  CmplxType ci = CmplxType(0.0, 1.0);
+  CmplxType z1 = z;
+
+  if (a0 < 1e-100) {  // Treat z=0 as a special case
+    cbk1 = CmplxType(inf, 0.0);
+  } else {
+    if (z.real() < 0.0) z1 = -z;
+    if (a0 <= joint_val) {  // Using backward recurrence for |z|<=joint_val
+                            // (default:9)
+      CmplxType cbs  = CmplxType(0.0, 0.0);
+      CmplxType csk0 = CmplxType(0.0, 0.0);
+      CmplxType cf0  = CmplxType(0.0, 0.0);
+      CmplxType cf1  = CmplxType(1e-100, 0.0);
+      CmplxType cf, cs0;
+      for (int k = bw_start; k >= 0; k--) {  // Backward recurrence (default:
+                                             // 30)
+        cf = 2.0 * (k + 1.0) * cf1 / z1 + cf0;
+        if (k == 1) cbi1 = cf;
+        if (k == 0) cbi0 = cf;
+        if ((k == 2 * (k / 2)) && (k != 0)) {
+          csk0 = csk0 + 4.0 * cf / static_cast<RealType>(k);
+        }
+        cbs = cbs + 2.0 * cf;
+        cf0 = cf1;
+        cf1 = cf;
+      }
+      cs0  = Kokkos::exp(z1) / (cbs - cf);
+      cbi0 = cbi0 * cs0;
+      cbi1 = cbi1 * cs0;
+      cbk0 = -(Kokkos::log(0.5 * z1) + el) * cbi0 + cs0 * csk0;
+      cbk1 = (1.0 / z1 - cbi1 * cbk0) / cbi0;
+    } else {  // Using asymptotic expansion (6.2.2) for |z|>joint_val
+              // (default:9)
+      CmplxType ca0  = Kokkos::sqrt(pi / (2.0 * z1)) * Kokkos::exp(-z1);
+      CmplxType cbkl = CmplxType(1.0, 0.0);
+      CmplxType cr   = CmplxType(1.0, 0.0);
+      for (int k = 1; k <= 30; k++) {
+        cr   = 0.125 * cr * (4.0 - pow(2.0 * k - 1.0, 2.0)) / (k * z1);
+        cbkl = cbkl + cr;
+      }
+      cbk1 = ca0 * cbkl;
+    }
+    if (z.real() < 0.0) {  // Apply (6.4.4)
+      if (z.imag() < 0.0)
+        cbk1 = -cbk1 - ci * pi * cyl_bessel_i1<CmplxType, RealType, IntType>(z);
+      if (z.imag() >= 0.0)
+        cbk1 = -cbk1 + ci * pi * cyl_bessel_i1<CmplxType, RealType, IntType>(z);
+    }
+  }
+  return cbk1;
+}
+
+//! Compute Hankel function H10(z) of the first kind of order zero
+//! for a complex argument
+template <class CmplxType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_h10(const CmplxType& z) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  using RealType = typename CmplxType::value_type;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType ch10, cbk0, cbj0, cby0;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+  CmplxType ci      = CmplxType(0.0, 1.0);
+
+  if ((z.real() == 0.0) && (z.imag() == 0.0)) {
+    ch10 = CmplxType(1.0, -inf);
+  } else if (z.imag() <= 0.0) {
+    cbj0 = cyl_bessel_j0<CmplxType, RealType, int>(z);
+    cby0 = cyl_bessel_y0<CmplxType, RealType, int>(z);
+    ch10 = cbj0 + ci * cby0;
+  } else {  //(z.imag() > 0.0)
+    cbk0 = cyl_bessel_k0<CmplxType, RealType, int>(-ci * z, 18.0, 70);
+    ch10 = 2.0 / (pi * ci) * cbk0;
+  }
+
+  return ch10;
+}
+
+//! Compute Hankel function H11(z) of the first kind of order one
+//! for a complex argument
+template <class CmplxType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_h11(const CmplxType& z) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  using RealType = typename CmplxType::value_type;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType ch11, cbk1, cbj1, cby1;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+  CmplxType ci      = CmplxType(0.0, 1.0);
+
+  if ((z.real() == 0.0) && (z.imag() == 0.0)) {
+    ch11 = CmplxType(0.0, -inf);
+  } else if (z.imag() <= 0.0) {
+    cbj1 = cyl_bessel_j1<CmplxType, RealType, int>(z);
+    cby1 = cyl_bessel_y1<CmplxType, RealType, int>(z);
+    ch11 = cbj1 + ci * cby1;
+  } else {  //(z.imag() > 0.0)
+    cbk1 = cyl_bessel_k1<CmplxType, RealType, int>(-ci * z, 18.0, 70);
+    ch11 = -2.0 / pi * cbk1;
+  }
+
+  return ch11;
+}
+
+//! Compute Hankel function H20(z) of the second kind of order zero
+//! for a complex argument
+template <class CmplxType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_h20(const CmplxType& z) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  using RealType = typename CmplxType::value_type;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType ch20, cbk0, cbj0, cby0;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+  CmplxType ci      = CmplxType(0.0, 1.0);
+
+  if ((z.real() == 0.0) && (z.imag() == 0.0)) {
+    ch20 = CmplxType(1.0, inf);
+  } else if (z.imag() >= 0.0) {
+    cbj0 = cyl_bessel_j0<CmplxType, RealType, int>(z);
+    cby0 = cyl_bessel_y0<CmplxType, RealType, int>(z);
+    ch20 = cbj0 - ci * cby0;
+  } else {  //(z.imag() < 0.0)
+    cbk0 = cyl_bessel_k0<CmplxType, RealType, int>(ci * z, 18.0, 70);
+    ch20 = 2.0 / pi * ci * cbk0;
+  }
+
+  return ch20;
+}
+
+//! Compute Hankel function H21(z) of the second kind of order one
+//! for a complex argument
+template <class CmplxType>
+KOKKOS_INLINE_FUNCTION CmplxType cyl_bessel_h21(const CmplxType& z) {
+  // This function is converted and modified from the corresponding Fortran
+  // programs CH12N in S. Zhang & J. Jin "Computation of Special Functions"
+  //(Wiley, 1996).
+  using RealType = typename CmplxType::value_type;
+  using Kokkos::Experimental::infinity;
+
+  constexpr auto inf = infinity<RealType>::value;
+
+  CmplxType ch21, cbk1, cbj1, cby1;
+  constexpr auto pi = Kokkos::Experimental::pi_v<RealType>;
+  CmplxType ci      = CmplxType(0.0, 1.0);
+
+  if ((z.real() == 0.0) && (z.imag() == 0.0)) {
+    ch21 = CmplxType(0.0, inf);
+  } else if (z.imag() >= 0.0) {
+    cbj1 = cyl_bessel_j1<CmplxType, RealType, int>(z);
+    cby1 = cyl_bessel_y1<CmplxType, RealType, int>(z);
+    ch21 = cbj1 - ci * cby1;
+  } else {  //(z.imag() < 0.0)
+    cbk1 = cyl_bessel_k1<CmplxType, RealType, int>(ci * z, 18.0, 70);
+    ch21 = -2.0 / pi * cbk1;
+  }
+
+  return ch21;
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHSPECFUNCTIONS
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_MATHSPECFUNCTIONS
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_MemoryPool.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_MemoryPool.hpp
new file mode 100644 (file)
index 0000000..30afa31
--- /dev/null
@@ -0,0 +1,828 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_MEMORYPOOL_HPP
+#define KOKKOS_MEMORYPOOL_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+#include <iostream>
+
+namespace Kokkos {
+namespace Impl {
+/* Report violation of size constraints:
+ *   min_block_alloc_size <= max_block_alloc_size
+ *   max_block_alloc_size <= min_superblock_size
+ *   min_superblock_size  <= max_superblock_size
+ *   min_superblock_size  <= min_total_alloc_size
+ *   min_superblock_size  <= min_block_alloc_size *
+ *                           max_block_per_superblock
+ */
+void memory_pool_bounds_verification(size_t min_block_alloc_size,
+                                     size_t max_block_alloc_size,
+                                     size_t min_superblock_size,
+                                     size_t max_superblock_size,
+                                     size_t max_block_per_superblock,
+                                     size_t min_total_alloc_size);
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+namespace Impl {
+
+void _print_memory_pool_state(std::ostream &s, uint32_t const *sb_state_ptr,
+                              int32_t sb_count, uint32_t sb_size_lg2,
+                              uint32_t sb_state_size, uint32_t state_shift,
+                              uint32_t state_used_mask);
+
+}  // end namespace Impl
+
+template <typename DeviceType>
+class MemoryPool {
+ private:
+  using CB = Kokkos::Impl::concurrent_bitset;
+
+  enum : uint32_t { bits_per_int_lg2 = CB::bits_per_int_lg2 };
+  enum : uint32_t { state_shift = CB::state_shift };
+  enum : uint32_t { state_used_mask = CB::state_used_mask };
+  enum : uint32_t { state_header_mask = CB::state_header_mask };
+  enum : uint32_t { max_bit_count_lg2 = CB::max_bit_count_lg2 };
+  enum : uint32_t { max_bit_count = CB::max_bit_count };
+
+  enum : uint32_t { HINT_PER_BLOCK_SIZE = 2 };
+
+  /*  Each superblock has a concurrent bitset state
+   *  which is an array of uint32_t integers.
+   *    [ { block_count_lg2  : state_shift bits
+   *      , used_block_count : ( 32 - state_shift ) bits
+   *      }
+   *    , { block allocation bit set }* ]
+   *
+   *  As superblocks are assigned (allocated) to a block size
+   *  and released (deallocated) back to empty the superblock state
+   *  is concurrently updated.
+   */
+
+  /*  Mapping between block_size <-> block_state
+   *
+   *  block_state = ( m_sb_size_lg2 - block_size_lg2 ) << state_shift
+   *  block_size  = m_sb_size_lg2 - ( block_state >> state_shift )
+   *
+   *  Thus A_block_size < B_block_size  <=>  A_block_state > B_block_state
+   */
+
+  using base_memory_space = typename DeviceType::memory_space;
+
+  enum {
+    accessible = Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
+                                                 base_memory_space>::accessible
+  };
+
+  using Tracker = Kokkos::Impl::SharedAllocationTracker;
+  using Record  = Kokkos::Impl::SharedAllocationRecord<base_memory_space>;
+
+  Tracker m_tracker;
+  uint32_t *m_sb_state_array;
+  uint32_t m_sb_state_size;
+  uint32_t m_sb_size_lg2;
+  uint32_t m_max_block_size_lg2;
+  uint32_t m_min_block_size_lg2;
+  int32_t m_sb_count;
+  int32_t m_hint_offset;  // Offset to K * #block_size array of hints
+  int32_t m_data_offset;  // Offset to 0th superblock data
+  int32_t m_unused_padding;
+
+ public:
+  using memory_space = typename DeviceType::memory_space;
+
+  /**\brief  The maximum size of a superblock and block */
+  enum : uint32_t { max_superblock_size = 1LU << 31 /* 2 gigabytes */ };
+  enum : uint32_t { max_block_per_superblock = max_bit_count };
+
+  //--------------------------------------------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator==(MemoryPool const &other) const {
+    return m_sb_state_array == other.m_sb_state_array;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  size_t capacity() const noexcept {
+    return size_t(m_sb_count) << m_sb_size_lg2;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  size_t min_block_size() const noexcept {
+    return (1LU << m_min_block_size_lg2);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  size_t max_block_size() const noexcept {
+    return (1LU << m_max_block_size_lg2);
+  }
+
+  struct usage_statistics {
+    size_t capacity_bytes;        ///<  Capacity in bytes
+    size_t superblock_bytes;      ///<  Superblock size in bytes
+    size_t max_block_bytes;       ///<  Maximum block size in bytes
+    size_t min_block_bytes;       ///<  Minimum block size in bytes
+    size_t capacity_superblocks;  ///<  Number of superblocks
+    size_t consumed_superblocks;  ///<  Superblocks assigned to allocations
+    size_t consumed_blocks;       ///<  Number of allocations
+    size_t consumed_bytes;        ///<  Bytes allocated
+    size_t reserved_blocks;  ///<  Unallocated blocks in assigned superblocks
+    size_t reserved_bytes;   ///<  Unallocated bytes in assigned superblocks
+  };
+
+  void get_usage_statistics(usage_statistics &stats) const {
+    Kokkos::HostSpace host;
+
+    const size_t alloc_size = m_hint_offset * sizeof(uint32_t);
+
+    uint32_t *const sb_state_array =
+        accessible ? m_sb_state_array : (uint32_t *)host.allocate(alloc_size);
+
+    if (!accessible) {
+      Kokkos::Impl::DeepCopy<Kokkos::HostSpace, base_memory_space>(
+          sb_state_array, m_sb_state_array, alloc_size);
+      Kokkos::fence(
+          "MemoryPool::get_usage_statistics(): fence after copying state "
+          "array to HostSpace");
+    }
+
+    stats.superblock_bytes     = (1LU << m_sb_size_lg2);
+    stats.max_block_bytes      = (1LU << m_max_block_size_lg2);
+    stats.min_block_bytes      = (1LU << m_min_block_size_lg2);
+    stats.capacity_bytes       = stats.superblock_bytes * m_sb_count;
+    stats.capacity_superblocks = m_sb_count;
+    stats.consumed_superblocks = 0;
+    stats.consumed_blocks      = 0;
+    stats.consumed_bytes       = 0;
+    stats.reserved_blocks      = 0;
+    stats.reserved_bytes       = 0;
+
+    const uint32_t *sb_state_ptr = sb_state_array;
+
+    for (int32_t i = 0; i < m_sb_count; ++i, sb_state_ptr += m_sb_state_size) {
+      const uint32_t block_count_lg2 = (*sb_state_ptr) >> state_shift;
+
+      if (block_count_lg2) {
+        const uint32_t block_count    = 1u << block_count_lg2;
+        const uint32_t block_size_lg2 = m_sb_size_lg2 - block_count_lg2;
+        const uint32_t block_size     = 1u << block_size_lg2;
+        const uint32_t block_used     = (*sb_state_ptr) & state_used_mask;
+
+        stats.consumed_superblocks++;
+        stats.consumed_blocks += block_used;
+        stats.consumed_bytes += block_used * block_size;
+        stats.reserved_blocks += block_count - block_used;
+        stats.reserved_bytes += (block_count - block_used) * block_size;
+      }
+    }
+
+    if (!accessible) {
+      host.deallocate(sb_state_array, alloc_size);
+    }
+  }
+
+  void print_state(std::ostream &s) const {
+    Kokkos::HostSpace host;
+
+    const size_t alloc_size = m_hint_offset * sizeof(uint32_t);
+
+    uint32_t *const sb_state_array =
+        accessible ? m_sb_state_array : (uint32_t *)host.allocate(alloc_size);
+
+    if (!accessible) {
+      Kokkos::Impl::DeepCopy<Kokkos::HostSpace, base_memory_space>(
+          sb_state_array, m_sb_state_array, alloc_size);
+      Kokkos::fence(
+          "MemoryPool::print_state(): fence after copying state array to "
+          "HostSpace");
+    }
+
+    Impl::_print_memory_pool_state(s, sb_state_array, m_sb_count, m_sb_size_lg2,
+                                   m_sb_state_size, state_shift,
+                                   state_used_mask);
+
+    if (!accessible) {
+      host.deallocate(sb_state_array, alloc_size);
+    }
+  }
+
+  //--------------------------------------------------------------------------
+
+  KOKKOS_DEFAULTED_FUNCTION MemoryPool(MemoryPool &&)      = default;
+  KOKKOS_DEFAULTED_FUNCTION MemoryPool(const MemoryPool &) = default;
+  KOKKOS_DEFAULTED_FUNCTION MemoryPool &operator=(MemoryPool &&) = default;
+  KOKKOS_DEFAULTED_FUNCTION MemoryPool &operator=(const MemoryPool &) = default;
+
+  KOKKOS_INLINE_FUNCTION MemoryPool()
+      : m_tracker(),
+        m_sb_state_array(nullptr),
+        m_sb_state_size(0),
+        m_sb_size_lg2(0),
+        m_max_block_size_lg2(0),
+        m_min_block_size_lg2(0),
+        m_sb_count(0),
+        m_hint_offset(0),
+        m_data_offset(0),
+        m_unused_padding(0) {}
+
+  /**\brief  Allocate a memory pool from 'memspace'.
+   *
+   *  The memory pool will have at least 'min_total_alloc_size' bytes
+   *  of memory to allocate divided among superblocks of at least
+   *  'min_superblock_size' bytes.  A single allocation must fit
+   *  within a single superblock, so 'min_superblock_size' must be
+   *  at least as large as the maximum single allocation.
+   *  Both 'min_total_alloc_size' and 'min_superblock_size'
+   *  are rounded up to the smallest power-of-two value that
+   *  contains the corresponding sizes.
+   *  Individual allocations will always consume a block of memory that
+   *  is also a power-of-two.  These roundings are made to enable
+   *  significant runtime performance improvements.
+   */
+  MemoryPool(const base_memory_space &memspace,
+             const size_t min_total_alloc_size, size_t min_block_alloc_size = 0,
+             size_t max_block_alloc_size = 0, size_t min_superblock_size = 0)
+      : m_tracker(),
+        m_sb_state_array(nullptr),
+        m_sb_state_size(0),
+        m_sb_size_lg2(0),
+        m_max_block_size_lg2(0),
+        m_min_block_size_lg2(0),
+        m_sb_count(0),
+        m_hint_offset(0),
+        m_data_offset(0),
+        m_unused_padding(0) {
+    const uint32_t int_align_lg2               = 3; /* align as int[8] */
+    const uint32_t int_align_mask              = (1u << int_align_lg2) - 1;
+    const uint32_t default_min_block_size      = 1u << 6;  /* 64 bytes */
+    const uint32_t default_max_block_size      = 1u << 12; /* 4k bytes */
+    const uint32_t default_min_superblock_size = 1u << 20; /* 1M bytes */
+
+    //--------------------------------------------------
+    // Default block and superblock sizes:
+
+    if (0 == min_block_alloc_size) {
+      // Default all sizes:
+
+      min_superblock_size =
+          std::min(size_t(default_min_superblock_size), min_total_alloc_size);
+
+      min_block_alloc_size =
+          std::min(size_t(default_min_block_size), min_superblock_size);
+
+      max_block_alloc_size =
+          std::min(size_t(default_max_block_size), min_superblock_size);
+    } else if (0 == min_superblock_size) {
+      // Choose superblock size as minimum of:
+      //   max_block_per_superblock * min_block_size
+      //   max_superblock_size
+      //   min_total_alloc_size
+
+      const size_t max_superblock =
+          min_block_alloc_size * max_block_per_superblock;
+
+      min_superblock_size =
+          std::min(max_superblock,
+                   std::min(size_t(max_superblock_size), min_total_alloc_size));
+    }
+
+    if (0 == max_block_alloc_size) {
+      max_block_alloc_size = min_superblock_size;
+    }
+
+    //--------------------------------------------------
+
+    /* Enforce size constraints:
+     *   min_block_alloc_size <= max_block_alloc_size
+     *   max_block_alloc_size <= min_superblock_size
+     *   min_superblock_size  <= max_superblock_size
+     *   min_superblock_size  <= min_total_alloc_size
+     *   min_superblock_size  <= min_block_alloc_size *
+     *                           max_block_per_superblock
+     */
+
+    Kokkos::Impl::memory_pool_bounds_verification(
+        min_block_alloc_size, max_block_alloc_size, min_superblock_size,
+        max_superblock_size, max_block_per_superblock, min_total_alloc_size);
+
+    //--------------------------------------------------
+    // Block and superblock size is power of two:
+    // Maximum value is 'max_superblock_size'
+
+    m_min_block_size_lg2 =
+        Kokkos::Impl::integral_power_of_two_that_contains(min_block_alloc_size);
+
+    m_max_block_size_lg2 =
+        Kokkos::Impl::integral_power_of_two_that_contains(max_block_alloc_size);
+
+    m_sb_size_lg2 =
+        Kokkos::Impl::integral_power_of_two_that_contains(min_superblock_size);
+
+    {
+      // number of superblocks is multiple of superblock size that
+      // can hold min_total_alloc_size.
+
+      const uint64_t sb_size_mask = (1LU << m_sb_size_lg2) - 1;
+
+      m_sb_count = (min_total_alloc_size + sb_size_mask) >> m_sb_size_lg2;
+    }
+
+    {
+      // Any superblock can be assigned to the smallest size block
+      // Size the block bitset to maximum number of blocks
+
+      const uint32_t max_block_count_lg2 = m_sb_size_lg2 - m_min_block_size_lg2;
+
+      m_sb_state_size =
+          (CB::buffer_bound_lg2(max_block_count_lg2) + int_align_mask) &
+          ~int_align_mask;
+    }
+
+    // Array of all superblock states
+
+    const size_t all_sb_state_size =
+        (m_sb_count * m_sb_state_size + int_align_mask) & ~int_align_mask;
+
+    // Number of block sizes
+
+    const int32_t number_block_sizes =
+        1 + m_max_block_size_lg2 - m_min_block_size_lg2;
+
+    // Array length for possible block sizes
+    // Hint array is one uint32_t per block size
+
+    const int32_t block_size_array_size =
+        (number_block_sizes + int_align_mask) & ~int_align_mask;
+
+    m_hint_offset = all_sb_state_size;
+    m_data_offset = m_hint_offset + block_size_array_size * HINT_PER_BLOCK_SIZE;
+
+    // Allocation:
+
+    const size_t header_size = m_data_offset * sizeof(uint32_t);
+    const size_t alloc_size =
+        header_size + (size_t(m_sb_count) << m_sb_size_lg2);
+
+    Record *rec = Record::allocate(memspace, "Kokkos::MemoryPool", alloc_size);
+
+    m_tracker.assign_allocated_record_to_uninitialized(rec);
+
+    m_sb_state_array = (uint32_t *)rec->data();
+
+    Kokkos::HostSpace host;
+
+    uint32_t *const sb_state_array =
+        accessible ? m_sb_state_array : (uint32_t *)host.allocate(header_size);
+
+    for (int32_t i = 0; i < m_data_offset; ++i) sb_state_array[i] = 0;
+
+    // Initial assignment of empty superblocks to block sizes:
+
+    for (int32_t i = 0; i < number_block_sizes; ++i) {
+      const uint32_t block_size_lg2  = i + m_min_block_size_lg2;
+      const uint32_t block_count_lg2 = m_sb_size_lg2 - block_size_lg2;
+      const uint32_t block_state     = block_count_lg2 << state_shift;
+      const uint32_t hint_begin      = m_hint_offset + i * HINT_PER_BLOCK_SIZE;
+
+      // for block size index 'i':
+      //   sb_id_hint  = sb_state_array[ hint_begin ];
+      //   sb_id_begin = sb_state_array[ hint_begin + 1 ];
+
+      const int32_t jbeg = (i * m_sb_count) / number_block_sizes;
+      const int32_t jend = ((i + 1) * m_sb_count) / number_block_sizes;
+
+      sb_state_array[hint_begin]     = uint32_t(jbeg);
+      sb_state_array[hint_begin + 1] = uint32_t(jbeg);
+
+      for (int32_t j = jbeg; j < jend; ++j) {
+        sb_state_array[j * m_sb_state_size] = block_state;
+      }
+    }
+
+    // Write out initialized state:
+
+    if (!accessible) {
+      Kokkos::Impl::DeepCopy<base_memory_space, Kokkos::HostSpace>(
+          m_sb_state_array, sb_state_array, header_size);
+      Kokkos::fence(
+          "MemoryPool::MemoryPool(): fence after copying state array from "
+          "HostSpace");
+
+      host.deallocate(sb_state_array, header_size);
+    } else {
+      Kokkos::memory_fence();
+    }
+  }
+
+  //--------------------------------------------------------------------------
+
+ private:
+  /* Given a size 'n' get the block size in which it can be allocated.
+   * Restrict lower bound to minimum block size.
+   */
+  KOKKOS_FORCEINLINE_FUNCTION
+  uint32_t get_block_size_lg2(uint32_t n) const noexcept {
+    const unsigned i = Kokkos::Impl::integral_power_of_two_that_contains(n);
+
+    return i < m_min_block_size_lg2 ? m_min_block_size_lg2 : i;
+  }
+
+ public:
+  /* Return 0 for invalid block size */
+  KOKKOS_INLINE_FUNCTION
+  uint32_t allocate_block_size(uint64_t alloc_size) const noexcept {
+    return alloc_size <= (uint64_t(1) << m_max_block_size_lg2)
+               ? (uint32_t(1) << get_block_size_lg2(uint32_t(alloc_size)))
+               : 0;
+  }
+
+  //--------------------------------------------------------------------------
+  /**\brief  Allocate a block of memory that is at least 'alloc_size'
+   *
+   *  The block of memory is aligned to the minimum block size,
+   *  currently is 64 bytes, will never be less than 32 bytes.
+   *
+   *  If concurrent allocations and deallocations are taking place
+   *  then a single allocation attempt may fail due to lack of available space.
+   *  The allocation attempt will try up to 'attempt_limit' times.
+   */
+  KOKKOS_FUNCTION
+  void *allocate(size_t alloc_size, int32_t attempt_limit = 1) const noexcept {
+    if ((size_t(1) << m_max_block_size_lg2) < alloc_size) {
+      Kokkos::abort(
+          "Kokkos MemoryPool allocation request exceeded specified maximum "
+          "allocation size");
+    }
+
+    if (0 == alloc_size) return nullptr;
+
+    void *p = nullptr;
+
+    const uint32_t block_size_lg2 = get_block_size_lg2(alloc_size);
+
+    // Allocation will fit within a superblock
+    // that has block sizes ( 1 << block_size_lg2 )
+
+    const uint32_t block_count_lg2 = m_sb_size_lg2 - block_size_lg2;
+    const uint32_t block_state     = block_count_lg2 << state_shift;
+    const uint32_t block_count     = 1u << block_count_lg2;
+
+    // Superblock hints for this block size:
+    //   hint_sb_id_ptr[0] is the dynamically changing hint
+    //   hint_sb_id_ptr[1] is the static start point
+
+    volatile uint32_t *const hint_sb_id_ptr =
+        m_sb_state_array      /* memory pool state array */
+        + m_hint_offset       /* offset to hint portion of array */
+        + HINT_PER_BLOCK_SIZE /* number of hints per block size */
+              * (block_size_lg2 - m_min_block_size_lg2); /* block size id */
+
+    const int32_t sb_id_begin = int32_t(hint_sb_id_ptr[1]);
+
+    // Fast query clock register 'tic' to pseudo-randomize
+    // the guess for which block within a superblock should
+    // be claimed.  If not available then a search occurs.
+#if defined(KOKKOS_ENABLE_SYCL) && !defined(KOKKOS_ARCH_INTEL_GPU)
+    const uint32_t block_id_hint = alloc_size;
+#else
+    const uint32_t block_id_hint =
+        (uint32_t)(Kokkos::Impl::clock_tic()
+#ifdef __CUDA_ARCH__  // FIXME_CUDA
+                   // Spread out potentially concurrent access
+                   // by threads within a warp or thread block.
+                   + (threadIdx.x + blockDim.x * threadIdx.y)
+#endif
+        );
+#endif
+
+    // expected state of superblock for allocation
+    uint32_t sb_state = block_state;
+
+    int32_t sb_id = -1;
+
+    volatile uint32_t *sb_state_array = nullptr;
+
+    while (attempt_limit) {
+      int32_t hint_sb_id = -1;
+
+      if (sb_id < 0) {
+        // No superblock specified, try the hint for this block size
+
+        sb_id = hint_sb_id = int32_t(*hint_sb_id_ptr);
+
+        sb_state_array = m_sb_state_array + (sb_id * m_sb_state_size);
+      }
+
+      // Require:
+      //   0 <= sb_id
+      //   sb_state_array == m_sb_state_array + m_sb_state_size * sb_id
+
+      if (sb_state == (state_header_mask & *sb_state_array)) {
+        // This superblock state is as expected, for the moment.
+        // Attempt to claim a bit.  The attempt updates the state
+        // so have already made sure the state header is as expected.
+
+        const uint32_t count_lg2 = sb_state >> state_shift;
+        const uint32_t mask      = (1u << count_lg2) - 1;
+
+        const Kokkos::pair<int, int> result = CB::acquire_bounded_lg2(
+            sb_state_array, count_lg2, block_id_hint & mask, sb_state);
+
+        // If result.first < 0 then failed to acquire
+        // due to either full or buffer was wrong state.
+        // Could be wrong state if a deallocation raced the
+        // superblock to empty before the acquire could succeed.
+
+        if (0 <= result.first) {  // acquired a bit
+
+          const uint32_t size_lg2 = m_sb_size_lg2 - count_lg2;
+
+          // Set the allocated block pointer
+
+          p = ((char *)(m_sb_state_array + m_data_offset)) +
+              (uint64_t(sb_id) << m_sb_size_lg2)       // superblock memory
+              + (uint64_t(result.first) << size_lg2);  // block memory
+
+          break;  // Success
+        }
+      }
+      //------------------------------------------------------------------
+      //  Arrive here if failed to acquire a block.
+      //  Must find a new superblock.
+
+      //  Start searching at designated index for this block size.
+      //  Look for superblock that, in preferential order,
+      //  1) part-full superblock of this block size
+      //  2) empty superblock to claim for this block size
+      //  3) part-full superblock of the next larger block size
+
+      sb_state = block_state;  // Expect to find the desired state
+      sb_id    = -1;
+
+      bool update_hint        = false;
+      int32_t sb_id_empty     = -1;
+      int32_t sb_id_large     = -1;
+      uint32_t sb_state_large = 0;
+
+      sb_state_array = m_sb_state_array + sb_id_begin * m_sb_state_size;
+
+      for (int32_t i = 0, id = sb_id_begin; i < m_sb_count; ++i) {
+        //  Query state of the candidate superblock.
+        //  Note that the state may change at any moment
+        //  as concurrent allocations and deallocations occur.
+
+        const uint32_t full_state = *sb_state_array;
+        const uint32_t used       = full_state & state_used_mask;
+        const uint32_t state      = full_state & state_header_mask;
+
+        if (state == block_state) {
+          //  Superblock is assigned to this block size
+
+          if (used < block_count) {
+            // There is room to allocate one block
+
+            sb_id = id;
+
+            // Is there room to allocate more than one block?
+
+            update_hint = used + 1 < block_count;
+
+            break;
+          }
+        } else if (0 == used) {
+          // Superblock is empty
+
+          if (-1 == sb_id_empty) {
+            // Superblock is not assigned to this block size
+            // and is the first empty superblock encountered.
+            // Save this id to use if a partfull superblock is not found.
+
+            sb_id_empty = id;
+          }
+        } else if ((-1 == sb_id_empty /* have not found an empty */) &&
+                   (-1 == sb_id_large /* have not found a larger */) &&
+                   (state < block_state /* a larger block */) &&
+                   // is not full:
+                   (used < (1u << (state >> state_shift)))) {
+          //  First superblock encountered that is
+          //  larger than this block size and
+          //  has room for an allocation.
+          //  Save this id to use of partfull or empty superblock not found
+          sb_id_large    = id;
+          sb_state_large = state;
+        }
+
+        // Iterate around the superblock array:
+
+        if (++id < m_sb_count) {
+          sb_state_array += m_sb_state_size;
+        } else {
+          id             = 0;
+          sb_state_array = m_sb_state_array;
+        }
+      }
+
+      // printf("  search m_sb_count(%d) sb_id(%d) sb_id_empty(%d)
+      // sb_id_large(%d)\n" , m_sb_count , sb_id , sb_id_empty , sb_id_large);
+
+      if (sb_id < 0) {
+        //  Did not find a partfull superblock for this block size.
+
+        if (0 <= sb_id_empty) {
+          //  Found first empty superblock following designated superblock
+          //  Attempt to claim it for this block size.
+          //  If the claim fails assume that another thread claimed it
+          //  for this block size and try to use it anyway,
+          //  but do not update hint.
+
+          sb_id = sb_id_empty;
+
+          sb_state_array = m_sb_state_array + (sb_id * m_sb_state_size);
+
+          //  If successfully changed assignment of empty superblock 'sb_id'
+          //  to this block_size then update the hint.
+
+          const uint32_t state_empty = state_header_mask & *sb_state_array;
+
+          // If this thread claims the empty block then update the hint
+          update_hint =
+              state_empty == Kokkos::atomic_compare_exchange(
+                                 sb_state_array, state_empty, block_state);
+        } else if (0 <= sb_id_large) {
+          // Found a larger superblock with space available
+
+          sb_id    = sb_id_large;
+          sb_state = sb_state_large;
+
+          sb_state_array = m_sb_state_array + (sb_id * m_sb_state_size);
+        } else {
+          // Did not find a potentially usable superblock
+          --attempt_limit;
+        }
+      }
+
+      if (update_hint) {
+        Kokkos::atomic_compare_exchange(hint_sb_id_ptr, uint32_t(hint_sb_id),
+                                        uint32_t(sb_id));
+      }
+    }  // end allocation attempt loop
+    //--------------------------------------------------------------------
+
+    return p;
+  }
+  // end allocate
+  //--------------------------------------------------------------------------
+
+  /**\brief  Return an allocated block of memory to the pool.
+   *
+   *  Requires: p is return value from allocate( alloc_size );
+   *
+   *  For now the alloc_size is ignored.
+   */
+  KOKKOS_INLINE_FUNCTION
+  void deallocate(void *p, size_t /* alloc_size */) const noexcept {
+    if (nullptr == p) return;
+
+    // Determine which superblock and block
+    const ptrdiff_t d =
+        static_cast<char *>(p) -
+        reinterpret_cast<char *>(m_sb_state_array + m_data_offset);
+
+    // Verify contained within the memory pool's superblocks:
+    const int ok_contains =
+        (0 <= d) && (size_t(d) < (size_t(m_sb_count) << m_sb_size_lg2));
+
+    int ok_block_aligned = 0;
+    int ok_dealloc_once  = 0;
+
+    if (ok_contains) {
+      const int sb_id = d >> m_sb_size_lg2;
+
+      // State array for the superblock.
+      volatile uint32_t *const sb_state_array =
+          m_sb_state_array + (sb_id * m_sb_state_size);
+
+      const uint32_t block_state = (*sb_state_array) & state_header_mask;
+      const uint32_t block_size_lg2 =
+          m_sb_size_lg2 - (block_state >> state_shift);
+
+      ok_block_aligned = 0 == (d & ((1UL << block_size_lg2) - 1));
+
+      if (ok_block_aligned) {
+        // Map address to block's bit
+        // mask into superblock and then shift down for block index
+
+        const uint32_t bit =
+            (d & ((ptrdiff_t(1) << m_sb_size_lg2) - 1)) >> block_size_lg2;
+
+        const int result = CB::release(sb_state_array, bit, block_state);
+
+        ok_dealloc_once = 0 <= result;
+      }
+    }
+
+    if (!ok_contains || !ok_block_aligned || !ok_dealloc_once) {
+      Kokkos::abort("Kokkos MemoryPool::deallocate given erroneous pointer");
+    }
+  }
+  // end deallocate
+  //--------------------------------------------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  int number_of_superblocks() const noexcept { return m_sb_count; }
+
+  KOKKOS_INLINE_FUNCTION
+  void superblock_state(int sb_id, int &block_size, int &block_count_capacity,
+                        int &block_count_used) const noexcept {
+    block_size           = 0;
+    block_count_capacity = 0;
+    block_count_used     = 0;
+
+    bool can_access_state_array = []() {
+      KOKKOS_IF_ON_HOST(
+          (return SpaceAccessibility<DefaultHostExecutionSpace,
+                                     base_memory_space>::accessible;))
+      KOKKOS_IF_ON_DEVICE(
+          (return SpaceAccessibility<DefaultExecutionSpace,
+                                     base_memory_space>::accessible;))
+    }();
+
+    if (can_access_state_array) {
+      // Can access the state array
+
+      const uint32_t state =
+          ((uint32_t volatile *)m_sb_state_array)[sb_id * m_sb_state_size];
+
+      const uint32_t block_count_lg2 = state >> state_shift;
+      const uint32_t block_used      = state & state_used_mask;
+
+      block_size           = 1LU << (m_sb_size_lg2 - block_count_lg2);
+      block_count_capacity = 1LU << block_count_lg2;
+      block_count_used     = block_used;
+    }
+  }
+};
+
+}  // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_MEMORYPOOL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_MemoryTraits.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_MemoryTraits.hpp
new file mode 100644 (file)
index 0000000..079384f
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_MEMORYTRAITS_HPP
+#define KOKKOS_MEMORYTRAITS_HPP
+
+#include <impl/Kokkos_Traits.hpp>
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/** \brief  Memory access traits for views, an extension point.
+ *
+ *  These traits should be orthogonal.  If there are dependencies then
+ *  the MemoryTraits template must detect and enforce dependencies.
+ *
+ *  A zero value is the default for a View, indicating that none of
+ *  these traits are present.
+ */
+enum MemoryTraitsFlags {
+  Unmanaged    = 0x01,
+  RandomAccess = 0x02,
+  Atomic       = 0x04,
+  Restrict     = 0x08,
+  Aligned      = 0x10
+};
+
+template <unsigned T>
+struct MemoryTraits {
+  //! Tag this class as a kokkos memory traits:
+  using memory_traits = MemoryTraits<T>;
+  enum : bool {
+    is_unmanaged = (unsigned(0) != (T & unsigned(Kokkos::Unmanaged)))
+  };
+  enum : bool {
+    is_random_access = (unsigned(0) != (T & unsigned(Kokkos::RandomAccess)))
+  };
+  enum : bool { is_atomic = (unsigned(0) != (T & unsigned(Kokkos::Atomic))) };
+  enum : bool {
+    is_restrict = (unsigned(0) != (T & unsigned(Kokkos::Restrict)))
+  };
+  enum : bool { is_aligned = (unsigned(0) != (T & unsigned(Kokkos::Aligned))) };
+};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+using MemoryManaged   = Kokkos::MemoryTraits<0>;
+using MemoryUnmanaged = Kokkos::MemoryTraits<Kokkos::Unmanaged>;
+using MemoryRandomAccess =
+    Kokkos::MemoryTraits<Kokkos::Unmanaged | Kokkos::RandomAccess>;
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+static_assert((0 < int(KOKKOS_MEMORY_ALIGNMENT)) &&
+                  (0 == (int(KOKKOS_MEMORY_ALIGNMENT) &
+                         (int(KOKKOS_MEMORY_ALIGNMENT) - 1))),
+              "KOKKOS_MEMORY_ALIGNMENT must be a power of two");
+
+/** \brief Memory alignment settings
+ *
+ *  Sets global value for memory alignment.  Must be a power of two!
+ *  Enable compatibility of views from different devices with static stride.
+ *  Use compiler flag to enable overwrites.
+ */
+enum : unsigned {
+  MEMORY_ALIGNMENT           = KOKKOS_MEMORY_ALIGNMENT,
+  MEMORY_ALIGNMENT_THRESHOLD = KOKKOS_MEMORY_ALIGNMENT_THRESHOLD
+};
+
+// ------------------------------------------------------------------ //
+//  this identifies the default memory trait
+//
+template <typename Tp>
+struct is_default_memory_trait : std::false_type {};
+
+template <>
+struct is_default_memory_trait<Kokkos::MemoryTraits<0>> : std::true_type {};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_MEMORYTRAITS_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_MinMaxClamp.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_MinMaxClamp.hpp
new file mode 100644 (file)
index 0000000..6cb8d16
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_MIN_MAX_CLAMP_HPP
+#define KOKKOS_MIN_MAX_CLAMP_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Pair.hpp>
+
+#include <initializer_list>
+
+namespace Kokkos {
+
+// clamp
+template <class T>
+constexpr KOKKOS_INLINE_FUNCTION const T& clamp(const T& value, const T& lo,
+                                                const T& hi) {
+  KOKKOS_EXPECTS(!(hi < lo));
+  return (value < lo) ? lo : (hi < value) ? hi : value;
+}
+
+template <class T, class ComparatorType>
+constexpr KOKKOS_INLINE_FUNCTION const T& clamp(const T& value, const T& lo,
+                                                const T& hi,
+                                                ComparatorType comp) {
+  KOKKOS_EXPECTS(!comp(hi, lo));
+  return comp(value, lo) ? lo : comp(hi, value) ? hi : value;
+}
+
+// max
+template <class T>
+constexpr KOKKOS_INLINE_FUNCTION const T& max(const T& a, const T& b) {
+  return (a < b) ? b : a;
+}
+
+template <class T, class ComparatorType>
+constexpr KOKKOS_INLINE_FUNCTION const T& max(const T& a, const T& b,
+                                              ComparatorType comp) {
+  return comp(a, b) ? b : a;
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION constexpr T max(std::initializer_list<T> ilist) {
+  auto first      = ilist.begin();
+  auto const last = ilist.end();
+  auto result     = *first;
+  if (first == last) return result;
+  while (++first != last) {
+    if (result < *first) result = *first;
+  }
+  return result;
+}
+
+template <class T, class Compare>
+KOKKOS_INLINE_FUNCTION constexpr T max(std::initializer_list<T> ilist,
+                                       Compare comp) {
+  auto first      = ilist.begin();
+  auto const last = ilist.end();
+  auto result     = *first;
+  if (first == last) return result;
+  while (++first != last) {
+    if (comp(result, *first)) result = *first;
+  }
+  return result;
+}
+
+// min
+template <class T>
+constexpr KOKKOS_INLINE_FUNCTION const T& min(const T& a, const T& b) {
+  return (b < a) ? b : a;
+}
+
+template <class T, class ComparatorType>
+constexpr KOKKOS_INLINE_FUNCTION const T& min(const T& a, const T& b,
+                                              ComparatorType comp) {
+  return comp(b, a) ? b : a;
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION constexpr T min(std::initializer_list<T> ilist) {
+  auto first      = ilist.begin();
+  auto const last = ilist.end();
+  auto result     = *first;
+  if (first == last) return result;
+  while (++first != last) {
+    if (*first < result) result = *first;
+  }
+  return result;
+}
+
+template <class T, class Compare>
+KOKKOS_INLINE_FUNCTION constexpr T min(std::initializer_list<T> ilist,
+                                       Compare comp) {
+  auto first      = ilist.begin();
+  auto const last = ilist.end();
+  auto result     = *first;
+  if (first == last) return result;
+  while (++first != last) {
+    if (comp(*first, result)) result = *first;
+  }
+  return result;
+}
+
+// minmax
+template <class T>
+constexpr KOKKOS_INLINE_FUNCTION auto minmax(const T& a, const T& b) {
+  using return_t = ::Kokkos::pair<const T&, const T&>;
+  return (b < a) ? return_t{b, a} : return_t{a, b};
+}
+
+template <class T, class ComparatorType>
+constexpr KOKKOS_INLINE_FUNCTION auto minmax(const T& a, const T& b,
+                                             ComparatorType comp) {
+  using return_t = ::Kokkos::pair<const T&, const T&>;
+  return comp(b, a) ? return_t{b, a} : return_t{a, b};
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION constexpr Kokkos::pair<T, T> minmax(
+    std::initializer_list<T> ilist) {
+  auto first      = ilist.begin();
+  auto const last = ilist.end();
+  auto next       = first;
+  Kokkos::pair<T, T> result{*first, *first};
+  if (first == last || ++next == last) return result;
+  if (*next < *first)
+    result.first = *next;
+  else
+    result.second = *next;
+  first = next;
+  while (++first != last) {
+    if (++next == last) {
+      if (*first < result.first)
+        result.first = *first;
+      else if (!(*first < result.second))
+        result.second = *first;
+      break;
+    }
+    if (*next < *first) {
+      if (*next < result.first) result.first = *next;
+      if (!(*first < result.second)) result.second = *first;
+    } else {
+      if (*first < result.first) result.first = *first;
+      if (!(*next < result.second)) result.second = *next;
+    }
+    first = next;
+  }
+  return result;
+}
+
+template <class T, class Compare>
+KOKKOS_INLINE_FUNCTION constexpr Kokkos::pair<T, T> minmax(
+    std::initializer_list<T> ilist, Compare comp) {
+  auto first      = ilist.begin();
+  auto const last = ilist.end();
+  auto next       = first;
+  Kokkos::pair<T, T> result{*first, *first};
+  if (first == last || ++next == last) return result;
+  if (comp(*next, *first))
+    result.first = *next;
+  else
+    result.second = *next;
+  first = next;
+  while (++first != last) {
+    if (++next == last) {
+      if (comp(*first, result.first))
+        result.first = *first;
+      else if (!comp(*first, result.second))
+        result.second = *first;
+      break;
+    }
+    if (comp(*next, *first)) {
+      if (comp(*next, result.first)) result.first = *next;
+      if (!comp(*first, result.second)) result.second = *first;
+    } else {
+      if (comp(*first, result.first)) result.first = *first;
+      if (!comp(*next, result.second)) result.second = *next;
+    }
+    first = next;
+  }
+  return result;
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+namespace Experimental {
+using ::Kokkos::clamp;
+using ::Kokkos::max;
+using ::Kokkos::min;
+using ::Kokkos::minmax;
+}  // namespace Experimental
+#endif
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_NumericTraits.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_NumericTraits.hpp
new file mode 100644 (file)
index 0000000..e529aba
--- /dev/null
@@ -0,0 +1,661 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_NUMERIC_TRAITS_HPP
+#define KOKKOS_NUMERIC_TRAITS_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERICTRAITS
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <cfloat>
+#include <climits>
+#include <cmath>
+#include <cstdint>
+#include <type_traits>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+// clang-format off
+template <class> struct infinity_helper {};
+template <> struct infinity_helper<float> { static constexpr float value = HUGE_VALF; };
+template <> struct infinity_helper<double> { static constexpr double value = HUGE_VAL; };
+template <> struct infinity_helper<long double> { static constexpr long double value = HUGE_VALL; };
+template <class> struct finite_min_helper {};
+template <> struct finite_min_helper<bool> { static constexpr bool value = false; };
+template <> struct finite_min_helper<char> { static constexpr char value = CHAR_MIN; };
+template <> struct finite_min_helper<signed char> { static constexpr signed char value = SCHAR_MIN; };
+template <> struct finite_min_helper<unsigned char> { static constexpr unsigned char value = 0; };
+template <> struct finite_min_helper<short> { static constexpr short value = SHRT_MIN; };
+template <> struct finite_min_helper<unsigned short> { static constexpr unsigned short value = 0; };
+template <> struct finite_min_helper<int> { static constexpr int value = INT_MIN; };
+template <> struct finite_min_helper<unsigned int> { static constexpr unsigned int value = 0; };
+template <> struct finite_min_helper<long int> { static constexpr long int value = LONG_MIN; };
+template <> struct finite_min_helper<unsigned long int> { static constexpr unsigned long int value = 0; };
+template <> struct finite_min_helper<long long int> { static constexpr long long int value = LLONG_MIN; };
+template <> struct finite_min_helper<unsigned long long int> { static constexpr unsigned long long int value = 0; };
+template <> struct finite_min_helper<float> { static constexpr float value = -FLT_MAX; };
+template <> struct finite_min_helper<double> { static constexpr double value = -DBL_MAX; };
+template <> struct finite_min_helper<long double> { static constexpr long double value = -LDBL_MAX; };
+template <class> struct finite_max_helper {};
+template <> struct finite_max_helper<bool> { static constexpr bool value = true; };
+template <> struct finite_max_helper<char> { static constexpr char value = CHAR_MAX; };
+template <> struct finite_max_helper<signed char> { static constexpr signed char value = SCHAR_MAX; };
+template <> struct finite_max_helper<unsigned char> { static constexpr unsigned char value = UCHAR_MAX; };
+template <> struct finite_max_helper<short> { static constexpr short value = SHRT_MAX; };
+template <> struct finite_max_helper<unsigned short> { static constexpr unsigned short value = USHRT_MAX; };
+template <> struct finite_max_helper<int> { static constexpr int value = INT_MAX; };
+template <> struct finite_max_helper<unsigned int> { static constexpr unsigned int value = UINT_MAX; };
+template <> struct finite_max_helper<long int> { static constexpr long int value = LONG_MAX; };
+template <> struct finite_max_helper<unsigned long int> { static constexpr unsigned long int value = ULONG_MAX; };
+template <> struct finite_max_helper<long long int> { static constexpr long long int value = LLONG_MAX; };
+template <> struct finite_max_helper<unsigned long long int> { static constexpr unsigned long long int value = ULLONG_MAX; };
+template <> struct finite_max_helper<float> { static constexpr float value = FLT_MAX; };
+template <> struct finite_max_helper<double> { static constexpr double value = DBL_MAX; };
+template <> struct finite_max_helper<long double> { static constexpr long double value = LDBL_MAX; };
+template <class> struct epsilon_helper {};
+namespace{
+  // FIXME workaround for LDL_EPSILON with XL
+  template<typename T>
+  constexpr T machineeps() {
+    T epsilon = 1, prev = 1, expression = 1;
+    do {
+      prev = epsilon;
+      epsilon /= 2;
+      expression = 1 + epsilon;
+    } while (expression > 1);
+    return prev;
+  }
+}
+template <> struct epsilon_helper<float> { static constexpr float value = FLT_EPSILON; };
+template <> struct epsilon_helper<double> { static constexpr double value = DBL_EPSILON; };
+template <> struct epsilon_helper<long double> {
+#ifdef KOKKOS_COMPILER_IBM
+  static constexpr long double value = machineeps<long double>();
+#else
+  static constexpr long double value = LDBL_EPSILON;
+#endif
+};
+template <class> struct round_error_helper {};
+template <> struct round_error_helper<float> { static constexpr float value = 0.5F; };
+template <> struct round_error_helper<double> { static constexpr double value = 0.5; };
+template <> struct round_error_helper<long double> { static constexpr long double value = 0.5L; };
+template <class> struct norm_min_helper {};
+template <> struct norm_min_helper<float> { static constexpr float value = FLT_MIN; };
+template <> struct norm_min_helper<double> { static constexpr double value = DBL_MIN; };
+template <> struct norm_min_helper<long double> { static constexpr long double value = LDBL_MIN; };
+template <class> struct denorm_min_helper {};
+//                               Workaround for GCC <9.2, Clang <9, Intel
+//                               vvvvvvvvvvvvvvvvvvvvvvvvv
+#if defined(KOKKOS_ENABLE_CXX17) && defined (FLT_TRUE_MIN) || defined(_MSC_VER)
+template <> struct denorm_min_helper<float> { static constexpr float value = FLT_TRUE_MIN; };
+template <> struct denorm_min_helper<double> { static constexpr double value = DBL_TRUE_MIN; };
+template <> struct denorm_min_helper<long double> { static constexpr long double value = LDBL_TRUE_MIN; };
+#else
+template <> struct denorm_min_helper<float> { static constexpr float value = __FLT_DENORM_MIN__; };
+template <> struct denorm_min_helper<double> { static constexpr double value = __DBL_DENORM_MIN__; };
+template <> struct denorm_min_helper<long double> { static constexpr long double value = __LDBL_DENORM_MIN__; };
+#endif
+// GCC <10.3 is not able to evaluate T(1) / finite_max_v<T> at compile time when passing -frounding-math
+// https://godbolt.org/z/zj9svb1T7
+// Similar issue was reported on IBM Power without the compiler option
+#define KOKKOS_IMPL_WORKAROUND_CONSTANT_EXPRESSION_COMPILER_BUG
+#ifndef KOKKOS_IMPL_WORKAROUND_CONSTANT_EXPRESSION_COMPILER_BUG
+// NOTE see ?lamch routine from LAPACK that determines machine parameters for floating-point arithmetic
+template <class T>
+constexpr T safe_minimum(T /*ignored*/) {
+  constexpr auto one  = static_cast<T>(1);
+  constexpr auto eps  = epsilon_helper<T>::value;
+  constexpr auto tiny = norm_min_helper<T>::value;
+  constexpr auto huge = finite_max_helper<T>::value;
+  constexpr auto small = one / huge;  // error: is not a constant expression
+  return small >= tiny ? small * (one + eps) : tiny;
+}
+template <class> struct reciprocal_overflow_threshold_helper {};
+template <> struct reciprocal_overflow_threshold_helper<float> { static constexpr float value = safe_minimum(0.f); };
+template <> struct reciprocal_overflow_threshold_helper<double> { static constexpr double value = safe_minimum(0.); };
+template <> struct reciprocal_overflow_threshold_helper<long double> { static constexpr long double value = safe_minimum(0.l); };
+#else
+template <class> struct reciprocal_overflow_threshold_helper {};
+template <> struct reciprocal_overflow_threshold_helper<float> { static constexpr float value = norm_min_helper<float>::value; };  // OK for IEEE-754 floating-point numbers
+template <> struct reciprocal_overflow_threshold_helper<double> { static constexpr double value = norm_min_helper<double>::value; };
+template <> struct reciprocal_overflow_threshold_helper<long double> { static constexpr long double value = norm_min_helper<long double>::value; };
+#endif
+#undef KOKKOS_IMPL_WORKAROUND_CONSTANT_EXPRESSION_COMPILER_BUG
+template <class> struct quiet_NaN_helper {};
+template <> struct quiet_NaN_helper<float> { static constexpr float value = __builtin_nanf(""); };
+template <> struct quiet_NaN_helper<double> { static constexpr double value = __builtin_nan(""); };
+#if defined(_MSC_VER)
+template <> struct quiet_NaN_helper<long double> { static constexpr long double value = __builtin_nan(""); };
+#else
+template <> struct quiet_NaN_helper<long double> { static constexpr long double value = __builtin_nanl(""); };
+#endif
+template <class> struct signaling_NaN_helper {};
+template <> struct signaling_NaN_helper<float> { static constexpr float value = __builtin_nansf(""); };
+template <> struct signaling_NaN_helper<double> { static constexpr double value = __builtin_nans(""); };
+#if defined(_MSC_VER)
+template <> struct signaling_NaN_helper<long double> { static constexpr long double value = __builtin_nans(""); };
+#else
+template <> struct signaling_NaN_helper<long double> { static constexpr long double value = __builtin_nansl(""); };
+#endif
+template <class> struct digits_helper {};
+template <> struct digits_helper<bool> { static constexpr int value = 1; };
+template <> struct digits_helper<char> { static constexpr int value = CHAR_BIT - std::is_signed<char>::value; };
+template <> struct digits_helper<signed char> { static constexpr int value = CHAR_BIT - 1; };
+template <> struct digits_helper<unsigned char> { static constexpr int value = CHAR_BIT; };
+template <> struct digits_helper<short> { static constexpr int value = CHAR_BIT*sizeof(short)-1; };
+template <> struct digits_helper<unsigned short> { static constexpr int value = CHAR_BIT*sizeof(short); };
+template <> struct digits_helper<int> { static constexpr int value = CHAR_BIT*sizeof(int)-1; };
+template <> struct digits_helper<unsigned int> { static constexpr int value = CHAR_BIT*sizeof(int); };
+template <> struct digits_helper<long int> { static constexpr int value = CHAR_BIT*sizeof(long int)-1; };
+template <> struct digits_helper<unsigned long int> { static constexpr int value = CHAR_BIT*sizeof(long int); };
+template <> struct digits_helper<long long int> { static constexpr int value = CHAR_BIT*sizeof(long long int)-1; };
+template <> struct digits_helper<unsigned long long int> { static constexpr int value = CHAR_BIT*sizeof(long long int); };
+template <> struct digits_helper<float> { static constexpr int value = FLT_MANT_DIG; };
+template <> struct digits_helper<double> { static constexpr int value = DBL_MANT_DIG; };
+template <> struct digits_helper<long double> { static constexpr int value = LDBL_MANT_DIG; };
+template <class> struct digits10_helper {};
+template <> struct digits10_helper<bool> { static constexpr int value = 0; };
+// The fraction 643/2136 approximates log10(2) to 7 significant digits.
+// Workaround GCC compiler bug with -frounding-math that prevented the
+// floating-point expression to be evaluated at compile time.
+#define DIGITS10_HELPER_INTEGRAL(TYPE) \
+template <> struct digits10_helper<TYPE> { static constexpr int value = digits_helper<TYPE>::value * 643L / 2136; };
+DIGITS10_HELPER_INTEGRAL(char)
+DIGITS10_HELPER_INTEGRAL(signed char)
+DIGITS10_HELPER_INTEGRAL(unsigned char)
+DIGITS10_HELPER_INTEGRAL(short)
+DIGITS10_HELPER_INTEGRAL(unsigned short)
+DIGITS10_HELPER_INTEGRAL(int)
+DIGITS10_HELPER_INTEGRAL(unsigned int)
+DIGITS10_HELPER_INTEGRAL(long int)
+DIGITS10_HELPER_INTEGRAL(unsigned long int)
+DIGITS10_HELPER_INTEGRAL(long long int)
+DIGITS10_HELPER_INTEGRAL(unsigned long long int)
+#undef DIGITS10_HELPER_INTEGRAL
+template <> struct digits10_helper<float> { static constexpr int value = FLT_DIG; };
+template <> struct digits10_helper<double> { static constexpr int value = DBL_DIG; };
+template <> struct digits10_helper<long double> { static constexpr int value = LDBL_DIG; };
+template <class> struct max_digits10_helper {};
+// Approximate ceil(digits<T>::value * log10(2) + 1)
+#define MAX_DIGITS10_HELPER(TYPE) \
+template <> struct max_digits10_helper<TYPE> { static constexpr int value = (digits_helper<TYPE>::value * 643L + 2135) / 2136 + 1; };
+#ifdef FLT_DECIMAL_DIG
+template <> struct max_digits10_helper<float> { static constexpr int value = FLT_DECIMAL_DIG; };
+#else
+MAX_DIGITS10_HELPER(float)
+#endif
+#ifdef DBL_DECIMAL_DIG
+template <> struct max_digits10_helper<double> { static constexpr int value = DBL_DECIMAL_DIG; };
+#else
+MAX_DIGITS10_HELPER(double)
+#endif
+#ifdef DECIMAL_DIG
+template <> struct max_digits10_helper<long double> { static constexpr int value = DECIMAL_DIG; };
+#elif LDBL_DECIMAL_DIG
+template <> struct max_digits10_helper<long double> { static constexpr int value = LDBL_DECIMAL_DIG; };
+#else
+MAX_DIGITS10_HELPER(long double)
+#endif
+#undef MAX_DIGITS10_HELPER
+template <class> struct radix_helper {};
+template <> struct radix_helper<bool> { static constexpr int value = 2; };
+template <> struct radix_helper<char> { static constexpr int value = 2; };
+template <> struct radix_helper<signed char> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned char> { static constexpr int value = 2; };
+template <> struct radix_helper<short> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned short> { static constexpr int value = 2; };
+template <> struct radix_helper<int> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned int> { static constexpr int value = 2; };
+template <> struct radix_helper<long int> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned long int> { static constexpr int value = 2; };
+template <> struct radix_helper<long long int> { static constexpr int value = 2; };
+template <> struct radix_helper<unsigned long long int> { static constexpr int value = 2; };
+template <> struct radix_helper<float> { static constexpr int value = FLT_RADIX; };
+template <> struct radix_helper<double> { static constexpr int value = FLT_RADIX; };
+template <> struct radix_helper<long double> { static constexpr int value = FLT_RADIX; };
+template <class> struct min_exponent_helper {};
+template <> struct min_exponent_helper<float> { static constexpr int value = FLT_MIN_EXP; };
+template <> struct min_exponent_helper<double> { static constexpr int value = DBL_MIN_EXP; };
+template <> struct min_exponent_helper<long double> { static constexpr int value = LDBL_MIN_EXP; };
+template <class> struct min_exponent10_helper {};
+template <> struct min_exponent10_helper<float> { static constexpr int value = FLT_MIN_10_EXP; };
+template <> struct min_exponent10_helper<double> { static constexpr int value = DBL_MIN_10_EXP; };
+template <> struct min_exponent10_helper<long double> { static constexpr int value = LDBL_MIN_10_EXP; };
+template <class> struct max_exponent_helper {};
+template <> struct max_exponent_helper<float> { static constexpr int value = FLT_MAX_EXP; };
+template <> struct max_exponent_helper<double> { static constexpr int value = DBL_MAX_EXP; };
+template <> struct max_exponent_helper<long double> { static constexpr int value = LDBL_MAX_EXP; };
+template <class> struct max_exponent10_helper{};
+template <> struct max_exponent10_helper<float> { static constexpr int value = FLT_MAX_10_EXP; };
+template <> struct max_exponent10_helper<double> { static constexpr int value = DBL_MAX_10_EXP; };
+template <> struct max_exponent10_helper<long double> { static constexpr int value = LDBL_MAX_10_EXP; };
+// clang-format on
+}  // namespace Impl
+
+#if defined(KOKKOS_ENABLE_CXX17)
+#define KOKKOS_IMPL_DEFINE_TRAIT(TRAIT)                        \
+  template <class T>                                           \
+  struct TRAIT : Impl::TRAIT##_helper<std::remove_cv_t<T>> {}; \
+  template <class T>                                           \
+  inline constexpr auto TRAIT##_v = TRAIT<T>::value;
+#else
+#define KOKKOS_IMPL_DEFINE_TRAIT(TRAIT) \
+  template <class T>                    \
+  struct TRAIT : Impl::TRAIT##_helper<std::remove_cv_t<T>> {};
+#endif
+
+// Numeric distinguished value traits
+KOKKOS_IMPL_DEFINE_TRAIT(infinity)
+KOKKOS_IMPL_DEFINE_TRAIT(finite_min)
+KOKKOS_IMPL_DEFINE_TRAIT(finite_max)
+KOKKOS_IMPL_DEFINE_TRAIT(epsilon)
+KOKKOS_IMPL_DEFINE_TRAIT(round_error)
+KOKKOS_IMPL_DEFINE_TRAIT(norm_min)
+KOKKOS_IMPL_DEFINE_TRAIT(denorm_min)
+KOKKOS_IMPL_DEFINE_TRAIT(reciprocal_overflow_threshold)
+KOKKOS_IMPL_DEFINE_TRAIT(quiet_NaN)
+KOKKOS_IMPL_DEFINE_TRAIT(signaling_NaN)
+
+// Numeric characteristics traits
+KOKKOS_IMPL_DEFINE_TRAIT(digits)
+KOKKOS_IMPL_DEFINE_TRAIT(digits10)
+KOKKOS_IMPL_DEFINE_TRAIT(max_digits10)
+KOKKOS_IMPL_DEFINE_TRAIT(radix)
+KOKKOS_IMPL_DEFINE_TRAIT(min_exponent)
+KOKKOS_IMPL_DEFINE_TRAIT(min_exponent10)
+KOKKOS_IMPL_DEFINE_TRAIT(max_exponent)
+KOKKOS_IMPL_DEFINE_TRAIT(max_exponent10)
+
+#undef KOKKOS_IMPL_DEFINE_TRAIT
+
+}  // namespace Experimental
+
+template <class T>
+struct reduction_identity; /*{
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T sum() { return T(); }  // 0
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T prod()  // 1
+    { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom prod reduction type"); return T(); }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T max()   // minimum value
+    { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom max reduction type"); return T(); }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T min()   // maximum value
+    { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom min reduction type"); return T(); }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T bor()   // 0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom bor reduction type"); return T(); }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T band()  // !0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom band reduction type"); return T(); }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T lor()   // 0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom lor reduction type"); return T(); }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static T land()  // !0, only for integer
+type { static_assert( false, "Missing specialization of
+Kokkos::reduction_identity for custom land reduction type"); return T(); }
+};*/
+
+template <>
+struct reduction_identity<signed char> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char sum() {
+    return static_cast<signed char>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char prod() {
+    return static_cast<signed char>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char max() {
+    return SCHAR_MIN;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char min() {
+    return SCHAR_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char bor() {
+    return static_cast<signed char>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char band() {
+    return ~static_cast<signed char>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char lor() {
+    return static_cast<signed char>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static signed char land() {
+    return static_cast<signed char>(1);
+  }
+};
+
+template <>
+struct reduction_identity<bool> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static bool lor() {
+    return static_cast<bool>(false);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static bool land() {
+    return static_cast<bool>(true);
+  }
+};
+
+template <>
+struct reduction_identity<short> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short sum() {
+    return static_cast<short>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short prod() {
+    return static_cast<short>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short max() { return SHRT_MIN; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short min() { return SHRT_MAX; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short bor() {
+    return static_cast<short>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short band() {
+    return ~static_cast<short>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short lor() {
+    return static_cast<short>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static short land() {
+    return static_cast<short>(1);
+  }
+};
+
+template <>
+struct reduction_identity<int> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int sum() {
+    return static_cast<int>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int prod() {
+    return static_cast<int>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int max() { return INT_MIN; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int min() { return INT_MAX; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int bor() {
+    return static_cast<int>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int band() {
+    return ~static_cast<int>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int lor() {
+    return static_cast<int>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static int land() {
+    return static_cast<int>(1);
+  }
+};
+
+template <>
+struct reduction_identity<long> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long sum() {
+    return static_cast<long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long prod() {
+    return static_cast<long>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long max() { return LONG_MIN; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long min() { return LONG_MAX; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long bor() {
+    return static_cast<long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long band() {
+    return ~static_cast<long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long lor() {
+    return static_cast<long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long land() {
+    return static_cast<long>(1);
+  }
+};
+
+template <>
+struct reduction_identity<long long> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long sum() {
+    return static_cast<long long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long prod() {
+    return static_cast<long long>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long max() {
+    return LLONG_MIN;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long min() {
+    return LLONG_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long bor() {
+    return static_cast<long long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long band() {
+    return ~static_cast<long long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long lor() {
+    return static_cast<long long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static long long land() {
+    return static_cast<long long>(1);
+  }
+};
+
+template <>
+struct reduction_identity<unsigned char> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char sum() {
+    return static_cast<unsigned char>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char prod() {
+    return static_cast<unsigned char>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char max() {
+    return static_cast<unsigned char>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char min() {
+    return UCHAR_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char bor() {
+    return static_cast<unsigned char>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char band() {
+    return ~static_cast<unsigned char>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char lor() {
+    return static_cast<unsigned char>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned char land() {
+    return static_cast<unsigned char>(1);
+  }
+};
+
+template <>
+struct reduction_identity<unsigned short> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short sum() {
+    return static_cast<unsigned short>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short prod() {
+    return static_cast<unsigned short>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short max() {
+    return static_cast<unsigned short>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short min() {
+    return USHRT_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short bor() {
+    return static_cast<unsigned short>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short band() {
+    return ~static_cast<unsigned short>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short lor() {
+    return static_cast<unsigned short>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned short land() {
+    return static_cast<unsigned short>(1);
+  }
+};
+
+template <>
+struct reduction_identity<unsigned int> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int sum() {
+    return static_cast<unsigned int>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int prod() {
+    return static_cast<unsigned int>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int max() {
+    return static_cast<unsigned int>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int min() {
+    return UINT_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int bor() {
+    return static_cast<unsigned int>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int band() {
+    return ~static_cast<unsigned int>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int lor() {
+    return static_cast<unsigned int>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned int land() {
+    return static_cast<unsigned int>(1);
+  }
+};
+
+template <>
+struct reduction_identity<unsigned long> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long sum() {
+    return static_cast<unsigned long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long prod() {
+    return static_cast<unsigned long>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long max() {
+    return static_cast<unsigned long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long min() {
+    return ULONG_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long bor() {
+    return static_cast<unsigned long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long band() {
+    return ~static_cast<unsigned long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long lor() {
+    return static_cast<unsigned long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long land() {
+    return static_cast<unsigned long>(1);
+  }
+};
+
+template <>
+struct reduction_identity<unsigned long long> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long sum() {
+    return static_cast<unsigned long long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long prod() {
+    return static_cast<unsigned long long>(1);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long max() {
+    return static_cast<unsigned long long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long min() {
+    return ULLONG_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long bor() {
+    return static_cast<unsigned long long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long band() {
+    return ~static_cast<unsigned long long>(0x0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long lor() {
+    return static_cast<unsigned long long>(0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static unsigned long long land() {
+    return static_cast<unsigned long long>(1);
+  }
+};
+
+template <>
+struct reduction_identity<float> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float sum() {
+    return static_cast<float>(0.0f);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float prod() {
+    return static_cast<float>(1.0f);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float max() { return -FLT_MAX; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static float min() { return FLT_MAX; }
+};
+
+template <>
+struct reduction_identity<double> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static double sum() {
+    return static_cast<double>(0.0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static double prod() {
+    return static_cast<double>(1.0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static double max() { return -DBL_MAX; }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static double min() { return DBL_MAX; }
+};
+
+// No __host__ __device__ annotation because long double treated as double in
+// device code.  May be revisited later if that is not true any more.
+template <>
+struct reduction_identity<long double> {
+  constexpr static long double sum() { return static_cast<long double>(0.0); }
+  constexpr static long double prod() { return static_cast<long double>(1.0); }
+  constexpr static long double max() { return -LDBL_MAX; }
+  constexpr static long double min() { return LDBL_MAX; }
+};
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERICTRAITS
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_NUMERICTRAITS
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_OpenMP.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_OpenMP.hpp
new file mode 100644 (file)
index 0000000..775b470
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_OPENMP_HPP
+#define KOKKOS_OPENMP_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_OPENMP)
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <cstddef>
+#include <iosfwd>
+#include <Kokkos_HostSpace.hpp>
+
+#ifdef KOKKOS_ENABLE_HBWSPACE
+#include <Kokkos_HBWSpace.hpp>
+#endif
+
+#include <Kokkos_ScratchSpace.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_TaskScheduler.hpp>
+#include <Kokkos_Layout.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+#include <vector>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+namespace Impl {
+class OpenMPInternal;
+}
+
+/// \class OpenMP
+/// \brief Kokkos device for multicore processors in the host memory space.
+class OpenMP {
+ public:
+  //! Tag this class as a kokkos execution space
+  using execution_space = OpenMP;
+
+  using memory_space =
+#ifdef KOKKOS_ENABLE_HBWSPACE
+      Experimental::HBWSpace;
+#else
+      HostSpace;
+#endif
+
+  //! This execution space preferred device_type
+  using device_type          = Kokkos::Device<execution_space, memory_space>;
+  using array_layout         = LayoutRight;
+  using size_type            = memory_space::size_type;
+  using scratch_memory_space = ScratchMemorySpace<OpenMP>;
+
+  OpenMP();
+
+  /// \brief Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  /// \brief is the instance running a parallel algorithm
+  inline static bool in_parallel(OpenMP const& = OpenMP()) noexcept;
+
+  /// \brief Wait until all dispatched functors complete on the given instance
+  ///
+  ///  This is a no-op on OpenMP
+  static void impl_static_fence(std::string const& name);
+
+  void fence(std::string const& name =
+                 "Kokkos::OpenMP::fence: Unnamed Instance Fence") const;
+
+  /// \brief Does the given instance return immediately after launching
+  /// a parallel algorithm
+  ///
+  /// This always returns false on OpenMP
+  inline static bool is_asynchronous(OpenMP const& = OpenMP()) noexcept;
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  /// \brief Partition the default instance into new instances without creating
+  ///  new masters
+  ///
+  /// This is a no-op on OpenMP since the default instance cannot be partitioned
+  /// without promoting other threads to 'master'
+  static std::vector<OpenMP> partition(...);
+
+  /// Non-default instances should be ref-counted so that when the last
+  /// is destroyed the instance resources are released
+  ///
+  /// This is a no-op on OpenMP since a non default instance cannot be created
+  static OpenMP create_instance(...);
+
+  /// \brief Partition the default instance and call 'f' on each new 'master'
+  /// thread
+  ///
+  /// Func is a functor with the following signiture
+  ///   void( int partition_id, int num_partitions )
+  template <typename F>
+  KOKKOS_DEPRECATED static void partition_master(
+      F const& f, int requested_num_partitions = 0,
+      int requested_partition_size = 0);
+#endif
+
+  // use UniqueToken
+  static int concurrency();
+
+  static void impl_initialize(InitializationSettings const&);
+
+  /// \brief is the default execution space initialized for current 'master'
+  /// thread
+  static bool impl_is_initialized() noexcept;
+
+  /// \brief Free any resources being consumed by the default execution space
+  static void impl_finalize();
+
+  inline static int impl_thread_pool_size() noexcept;
+
+  /** \brief  The rank of the executing thread in this thread pool */
+  KOKKOS_INLINE_FUNCTION
+  static int impl_thread_pool_rank() noexcept;
+
+  inline static int impl_thread_pool_size(int depth);
+
+  // use UniqueToken
+  inline static int impl_max_hardware_threads() noexcept;
+
+  // use UniqueToken
+  KOKKOS_INLINE_FUNCTION
+  static int impl_hardware_thread_id() noexcept;
+
+  static int impl_get_current_max_threads() noexcept;
+
+  Impl::OpenMPInternal* impl_internal_space_instance() const {
+#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
+    return m_space_instance;
+#else
+    return m_space_instance.get();
+#endif
+  }
+
+  static constexpr const char* name() noexcept { return "OpenMP"; }
+  uint32_t impl_instance_id() const noexcept { return 1; }
+
+ private:
+#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
+  Impl::OpenMPInternal* m_space_instance;
+#else
+  Kokkos::Impl::HostSharedPtr<Impl::OpenMPInternal> m_space_instance;
+#endif
+};
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<OpenMP> {
+  static constexpr DeviceType id = DeviceType::OpenMP;
+  static int device_id(const OpenMP&) { return 0; }
+};
+}  // namespace Experimental
+}  // namespace Tools
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct MemorySpaceAccess<Kokkos::OpenMP::memory_space,
+                         Kokkos::OpenMP::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
+#include <OpenMP/Kokkos_OpenMP_Team.hpp>
+#include <OpenMP/Kokkos_OpenMP_Parallel.hpp>
+#include <OpenMP/Kokkos_OpenMP_Task.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+/*--------------------------------------------------------------------------*/
+
+#endif /* #if defined( KOKKOS_ENABLE_OPENMP ) && defined( _OPENMP ) */
+#endif /* #ifndef KOKKOS_OPENMP_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_OpenMPTarget.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_OpenMPTarget.hpp
new file mode 100644 (file)
index 0000000..637b4c0
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_OPENMPTARGET_HPP
+#define KOKKOS_OPENMPTARGET_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(_OPENMP)
+
+#include <omp.h>
+
+#include <cstddef>
+#include <iosfwd>
+#include <Kokkos_OpenMPTargetSpace.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_TaskScheduler.hpp>
+#include <Kokkos_Layout.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+class OpenMPTargetInternal;
+}
+
+/// \class OpenMPTarget
+/// \brief Kokkos device for multicore processors in the host memory space.
+class OpenMPTarget {
+ public:
+  //------------------------------------
+  //! \name Type declarations that all Kokkos devices must provide.
+  //@{
+
+  //! Tag this class as a kokkos execution space
+  using execution_space = OpenMPTarget;
+  using memory_space    = OpenMPTargetSpace;
+  //! This execution space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  using array_layout = LayoutLeft;
+  using size_type    = memory_space::size_type;
+
+  using scratch_memory_space = ScratchMemorySpace<OpenMPTarget>;
+
+  inline static bool in_parallel() { return omp_in_parallel(); }
+
+  static void fence(const std::string& name =
+                        "Kokkos::OpenMPTarget::fence: Unnamed Instance Fence");
+
+  static void impl_static_fence(const std::string& name);
+
+  /** \brief  Return the maximum amount of concurrency.  */
+  static int concurrency();
+
+  //! Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  static const char* name();
+
+  //! Free any resources being consumed by the device.
+  static void impl_finalize();
+
+  //! Has been initialized
+  static int impl_is_initialized();
+
+  //! Initialize, telling the CUDA run-time library which device to use.
+  static void impl_initialize(InitializationSettings const&);
+
+  inline Impl::OpenMPTargetInternal* impl_internal_space_instance() const {
+    return m_space_instance;
+  }
+
+  OpenMPTarget();
+  uint32_t impl_instance_id() const noexcept;
+
+ private:
+  Impl::OpenMPTargetInternal* m_space_instance;
+};
+}  // namespace Experimental
+
+namespace Impl {
+template <>
+struct MemorySpaceAccess<
+    Kokkos::Experimental::OpenMPTargetSpace,
+    Kokkos::Experimental::OpenMPTarget::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+}  // namespace Impl
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<::Kokkos::Experimental::OpenMPTarget> {
+  static constexpr DeviceType id =
+      ::Kokkos::Profiling::Experimental::DeviceType::OpenMPTarget;
+  static int device_id(const Kokkos::Experimental::OpenMPTarget&) {
+    return omp_get_default_device();
+  }
+};
+}  // namespace Experimental
+}  // namespace Tools
+
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Parallel_MDRange.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Task.hpp>
+
+/*--------------------------------------------------------------------------*/
+
+#endif /* #if defined( KOKKOS_ENABLE_OPENMPTARGET ) && defined( _OPENMP ) */
+#endif /* #ifndef KOKKOS_OPENMPTARGET_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_OpenMPTargetSpace.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_OpenMPTargetSpace.hpp
new file mode 100644 (file)
index 0000000..b489744
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_OPENMPTARGETSPACE_HPP
+#define KOKKOS_OPENMPTARGETSPACE_HPP
+
+#include <cstring>
+#include <string>
+#include <iosfwd>
+#include <typeinfo>
+
+#include <Kokkos_Core_fwd.hpp>
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Error.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <omp.h>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+/// \brief Initialize lock array for arbitrary size atomics.
+///
+/// Arbitrary atomics are implemented using a hash table of locks
+/// where the hash value is derived from the address of the
+/// object for which an atomic operation is performed.
+/// This function initializes the locks to zero (unset).
+// void init_lock_array_host_space();
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+// bool lock_address_host_space(void* ptr);
+
+/// \brief Release lock for the address
+///
+/// This function releases the lock for the hash value derived
+/// from the provided ptr. This function should only be called
+/// after previously successfully acquiring a lock with
+/// lock_address.
+// void unlock_address_host_space(void* ptr);
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace,
+                         Kokkos::Experimental::OpenMPTargetSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+                         Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+
+/// \class OpenMPTargetSpace
+/// \brief Memory management for host memory.
+///
+/// OpenMPTargetSpace is a memory space that governs host memory.  "Host"
+/// memory means the usual CPU-accessible memory.
+class OpenMPTargetSpace {
+ public:
+  //! Tag this class as a kokkos memory space
+  using memory_space = OpenMPTargetSpace;
+  using size_type    = unsigned;
+
+  /// \typedef execution_space
+  /// \brief Default execution space for this memory space.
+  ///
+  /// Every memory space has a default execution space.  This is
+  /// useful for things like initializing a View (which happens in
+  /// parallel using the View's default execution space).
+  using execution_space = Kokkos::Experimental::OpenMPTarget;
+
+  //! This memory space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  /*--------------------------------*/
+
+  /**\brief  Default memory space instance */
+  OpenMPTargetSpace();
+  OpenMPTargetSpace(OpenMPTargetSpace&& rhs)      = default;
+  OpenMPTargetSpace(const OpenMPTargetSpace& rhs) = default;
+  OpenMPTargetSpace& operator=(OpenMPTargetSpace&&) = default;
+  OpenMPTargetSpace& operator=(const OpenMPTargetSpace&) = default;
+  ~OpenMPTargetSpace()                                   = default;
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr,
+                  const std::size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+  static constexpr const char* name() { return "OpenMPTargetSpace"; }
+
+ private:
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+
+  friend class Kokkos::Impl::SharedAllocationRecord<
+      Kokkos::Experimental::OpenMPTargetSpace, void>;
+};
+}  // namespace Experimental
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace, void>
+    : public HostInaccessibleSharedAllocationRecordCommon<
+          Kokkos::Experimental::OpenMPTargetSpace> {
+ private:
+  friend class HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::OpenMPTargetSpace>;
+  friend class SharedAllocationRecordCommon<
+      Kokkos::Experimental::OpenMPTargetSpace>;
+  friend Kokkos::Experimental::OpenMPTargetSpace;
+
+  using base_t = HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::OpenMPTargetSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  /**\brief  Root record for tracked allocations from this OpenMPTargetSpace
+   * instance */
+  static RecordBase s_root_record;
+
+  const Kokkos::Experimental::OpenMPTargetSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+  SharedAllocationRecord() = default;
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate);
+
+ public:
+  KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
+      const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc) {
+    KOKKOS_IF_ON_HOST(
+        (return new SharedAllocationRecord(arg_space, arg_label, arg_alloc);))
+    KOKKOS_IF_ON_DEVICE(
+        ((void)arg_space; (void)arg_label; (void)arg_alloc; return nullptr;))
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+// TODO: implement all possible deep_copies
+template <class ExecutionSpace>
+struct DeepCopy<Kokkos::Experimental::OpenMPTargetSpace,
+                Kokkos::Experimental::OpenMPTargetSpace, ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    // In the Release and RelWithDebInfo builds, the size of the memcpy should
+    // be greater than zero to avoid error. omp_target_memcpy returns zero on
+    // success.
+    if (n > 0)
+      OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
+                                       omp_get_default_device(),
+                                       omp_get_default_device()));
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence(
+        "Kokkos::Impl::DeepCopy<OpenMPTargetSpace, OpenMPTargetSpace>: fence "
+        "before "
+        "copy");
+    if (n > 0)
+      OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
+                                       omp_get_default_device(),
+                                       omp_get_default_device()));
+  }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<Kokkos::Experimental::OpenMPTargetSpace, HostSpace,
+                ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    if (n > 0)
+      OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
+                                       omp_get_default_device(),
+                                       omp_get_initial_device()));
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence(
+        "Kokkos::Impl::DeepCopy<OpenMPTargetSpace, HostSpace>: fence before "
+        "copy");
+    if (n > 0)
+      OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
+                                       omp_get_default_device(),
+                                       omp_get_initial_device()));
+  }
+};
+
+template <class ExecutionSpace>
+struct DeepCopy<HostSpace, Kokkos::Experimental::OpenMPTargetSpace,
+                ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    if (n > 0)
+      OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
+                                       omp_get_initial_device(),
+                                       omp_get_default_device()));
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence(
+        "Kokkos::Impl::DeepCopy<HostSpace, OpenMPTargetSpace>: fence before "
+        "copy");
+    if (n > 0)
+      OMPT_SAFE_CALL(omp_target_memcpy(dst, const_cast<void*>(src), n, 0, 0,
+                                       omp_get_initial_device(),
+                                       omp_get_default_device()));
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+#endif /* #define KOKKOS_OPENMPTARGETSPACE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Pair.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Pair.hpp
new file mode 100644 (file)
index 0000000..7e5b7ce
--- /dev/null
@@ -0,0 +1,526 @@
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+
+/// \file Kokkos_Pair.hpp
+/// \brief Declaration and definition of Kokkos::pair.
+///
+/// This header file declares and defines Kokkos::pair and its related
+/// nonmember functions.
+
+#ifndef KOKKOS_PAIR_HPP
+#define KOKKOS_PAIR_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PAIR
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <utility>
+
+namespace Kokkos {
+/// \struct pair
+/// \brief Replacement for std::pair that works on CUDA devices.
+///
+/// The instance methods of std::pair, including its constructors, are
+/// not marked as <tt>__device__</tt> functions.  Thus, they cannot be
+/// called on a CUDA device, such as an NVIDIA GPU.  This struct
+/// implements the same interface as std::pair, but can be used on a
+/// CUDA device as well as on the host.
+template <class T1, class T2>
+struct pair {
+  //! The first template parameter of this class.
+  using first_type = T1;
+  //! The second template parameter of this class.
+  using second_type = T2;
+
+  //! The first element of the pair.
+  first_type first;
+  //! The second element of the pair.
+  second_type second;
+
+  /// \brief Default constructor.
+  ///
+  /// This calls the default constructors of T1 and T2.  It won't
+  /// compile if those default constructors are not defined and
+  /// public.
+  KOKKOS_DEFAULTED_FUNCTION constexpr pair() = default;
+
+  /// \brief Constructor that takes both elements of the pair.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+#ifdef KOKKOS_COMPILER_NVHPC  // FIXME_NVHPC bug in NVHPC regarding constexpr
+                              // constructors used in device code
+  KOKKOS_FORCEINLINE_FUNCTION
+#else
+  KOKKOS_FORCEINLINE_FUNCTION constexpr
+#endif
+  pair(first_type const& f, second_type const& s) : first(f), second(s) {}
+
+  /// \brief Copy constructor.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  template <class U, class V>
+#ifdef KOKKOS_COMPILER_NVHPC  // FIXME_NVHPC bug in NVHPC regarding constexpr
+                              // constructors used in device code
+  KOKKOS_FORCEINLINE_FUNCTION
+#else
+  KOKKOS_FORCEINLINE_FUNCTION constexpr
+#endif
+  pair(const pair<U, V>& p)
+      : first(p.first), second(p.second) {
+  }
+
+  /// \brief Copy constructor.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const volatile pair<U, V>& p)
+      : first(p.first), second(p.second) {}
+
+  /// \brief Assignment operator.
+  ///
+  /// This calls the assignment operators of T1 and T2.  It won't
+  /// compile if the assignment operators are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION pair<T1, T2>& operator=(const pair<U, V>& p) {
+    first  = p.first;
+    second = p.second;
+    return *this;
+  }
+
+  /// \brief Assignment operator, for volatile <tt>*this</tt>.
+  ///
+  /// \param p [in] Input; right-hand side of the assignment.
+  ///
+  /// This calls the assignment operators of T1 and T2.  It will not
+  /// compile if the assignment operators are not defined and public.
+  ///
+  /// This operator returns \c void instead of <tt>volatile pair<T1,
+  /// T2>& </tt>.  See Kokkos Issue #177 for the explanation.  In
+  /// practice, this means that you should not chain assignments with
+  /// volatile lvalues.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION void operator=(
+      const volatile pair<U, V>& p) volatile {
+    first  = p.first;
+    second = p.second;
+    // We deliberately do not return anything here.  See explanation
+    // in public documentation above.
+  }
+
+  // from std::pair<U,V>
+  template <class U, class V>
+  pair(const std::pair<U, V>& p) : first(p.first), second(p.second) {}
+
+  /// \brief Return the std::pair version of this object.
+  ///
+  /// This is <i>not</i> a device function; you may not call it on a
+  /// CUDA device.  It is meant to be called on the host, if the user
+  /// wants an std::pair instead of a Kokkos::pair.
+  ///
+  /// \note This is not a conversion operator, since defining a
+  ///   conversion operator made the relational operators have
+  ///   ambiguous definitions.
+  std::pair<T1, T2> to_std_pair() const {
+    return std::make_pair(first, second);
+  }
+};
+
+template <class T1, class T2>
+struct pair<T1&, T2&> {
+  //! The first template parameter of this class.
+  using first_type = T1&;
+  //! The second template parameter of this class.
+  using second_type = T2&;
+
+  //! The first element of the pair.
+  first_type first;
+  //! The second element of the pair.
+  second_type second;
+
+  /// \brief Constructor that takes both elements of the pair.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(first_type f, second_type s)
+      : first(f), second(s) {}
+
+  /// \brief Copy constructor.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const pair<U, V>& p)
+      : first(p.first), second(p.second) {}
+
+  // from std::pair<U,V>
+  template <class U, class V>
+  pair(const std::pair<U, V>& p) : first(p.first), second(p.second) {}
+
+  /// \brief Assignment operator.
+  ///
+  /// This calls the assignment operators of T1 and T2.  It won't
+  /// compile if the assignment operators are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION pair<first_type, second_type>& operator=(
+      const pair<U, V>& p) {
+    first  = p.first;
+    second = p.second;
+    return *this;
+  }
+
+  /// \brief Return the std::pair version of this object.
+  ///
+  /// This is <i>not</i> a device function; you may not call it on a
+  /// CUDA device.  It is meant to be called on the host, if the user
+  /// wants an std::pair instead of a Kokkos::pair.
+  ///
+  /// \note This is not a conversion operator, since defining a
+  ///   conversion operator made the relational operators have
+  ///   ambiguous definitions.
+  std::pair<T1, T2> to_std_pair() const {
+    return std::make_pair(first, second);
+  }
+};
+
+template <class T1, class T2>
+struct pair<T1, T2&> {
+  //! The first template parameter of this class.
+  using first_type = T1;
+  //! The second template parameter of this class.
+  using second_type = T2&;
+
+  //! The first element of the pair.
+  first_type first;
+  //! The second element of the pair.
+  second_type second;
+
+  /// \brief Constructor that takes both elements of the pair.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(first_type const& f, second_type s)
+      : first(f), second(s) {}
+
+  /// \brief Copy constructor.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const pair<U, V>& p)
+      : first(p.first), second(p.second) {}
+
+  // from std::pair<U,V>
+  template <class U, class V>
+  pair(const std::pair<U, V>& p) : first(p.first), second(p.second) {}
+
+  /// \brief Assignment operator.
+  ///
+  /// This calls the assignment operators of T1 and T2.  It won't
+  /// compile if the assignment operators are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION pair<first_type, second_type>& operator=(
+      const pair<U, V>& p) {
+    first  = p.first;
+    second = p.second;
+    return *this;
+  }
+
+  /// \brief Return the std::pair version of this object.
+  ///
+  /// This is <i>not</i> a device function; you may not call it on a
+  /// CUDA device.  It is meant to be called on the host, if the user
+  /// wants an std::pair instead of a Kokkos::pair.
+  ///
+  /// \note This is not a conversion operator, since defining a
+  ///   conversion operator made the relational operators have
+  ///   ambiguous definitions.
+  std::pair<T1, T2> to_std_pair() const {
+    return std::make_pair(first, second);
+  }
+};
+
+template <class T1, class T2>
+struct pair<T1&, T2> {
+  //! The first template parameter of this class.
+  using first_type = T1&;
+  //! The second template parameter of this class.
+  using second_type = T2;
+
+  //! The first element of the pair.
+  first_type first;
+  //! The second element of the pair.
+  second_type second;
+
+  /// \brief Constructor that takes both elements of the pair.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(first_type f, second_type const& s)
+      : first(f), second(s) {}
+
+  /// \brief Copy constructor.
+  ///
+  /// This calls the copy constructors of T1 and T2.  It won't compile
+  /// if those copy constructors are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const pair<U, V>& p)
+      : first(p.first), second(p.second) {}
+
+  // from std::pair<U,V>
+  template <class U, class V>
+  pair(const std::pair<U, V>& p) : first(p.first), second(p.second) {}
+
+  /// \brief Assignment operator.
+  ///
+  /// This calls the assignment operators of T1 and T2.  It won't
+  /// compile if the assignment operators are not defined and public.
+  template <class U, class V>
+  KOKKOS_FORCEINLINE_FUNCTION pair<first_type, second_type>& operator=(
+      const pair<U, V>& p) {
+    first  = p.first;
+    second = p.second;
+    return *this;
+  }
+
+  /// \brief Return the std::pair version of this object.
+  ///
+  /// This is <i>not</i> a device function; you may not call it on a
+  /// CUDA device.  It is meant to be called on the host, if the user
+  /// wants an std::pair instead of a Kokkos::pair.
+  ///
+  /// \note This is not a conversion operator, since defining a
+  ///   conversion operator made the relational operators have
+  ///   ambiguous definitions.
+  std::pair<T1, T2> to_std_pair() const {
+    return std::make_pair(first, second);
+  }
+};
+
+//! Equality operator for Kokkos::pair.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator==(const pair<T1, T2>& lhs,
+                                                      const pair<T1, T2>& rhs) {
+  return lhs.first == rhs.first && lhs.second == rhs.second;
+}
+
+//! Inequality operator for Kokkos::pair.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator!=(const pair<T1, T2>& lhs,
+                                                      const pair<T1, T2>& rhs) {
+  return !(lhs == rhs);
+}
+
+//! Less-than operator for Kokkos::pair.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<(const pair<T1, T2>& lhs,
+                                                     const pair<T1, T2>& rhs) {
+  return lhs.first < rhs.first ||
+         (!(rhs.first < lhs.first) && lhs.second < rhs.second);
+}
+
+//! Less-than-or-equal-to operator for Kokkos::pair.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<=(const pair<T1, T2>& lhs,
+                                                      const pair<T1, T2>& rhs) {
+  return !(rhs < lhs);
+}
+
+//! Greater-than operator for Kokkos::pair.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>(const pair<T1, T2>& lhs,
+                                                     const pair<T1, T2>& rhs) {
+  return rhs < lhs;
+}
+
+//! Greater-than-or-equal-to operator for Kokkos::pair.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>=(const pair<T1, T2>& lhs,
+                                                      const pair<T1, T2>& rhs) {
+  return !(lhs < rhs);
+}
+
+/// \brief Return a new pair.
+///
+/// This is a "nonmember constructor" for Kokkos::pair.  It works just
+/// like std::make_pair.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr pair<T1, T2> make_pair(T1 x, T2 y) {
+  return (pair<T1, T2>(x, y));
+}
+
+/// \brief Return a pair of references to the input arguments.
+///
+/// This compares to std::tie (new in C++11).  You can use it to
+/// assign to two variables at once, from the result of a function
+/// that returns a pair.  For example (<tt>__device__</tt> and
+/// <tt>__host__</tt> attributes omitted for brevity):
+/// \code
+/// // Declaration of the function to call.
+/// // First return value: operation count.
+/// // Second return value: whether all operations succeeded.
+/// Kokkos::pair<int, bool> someFunction ();
+///
+/// // Code that uses Kokkos::tie.
+/// int myFunction () {
+///   int count = 0;
+///   bool success = false;
+///
+///   // This assigns to both count and success.
+///   Kokkos::tie (count, success) = someFunction ();
+///
+///   if (! success) {
+///     // ... Some operation failed;
+///     //     take corrective action ...
+///   }
+///   return count;
+/// }
+/// \endcode
+///
+/// The line that uses tie() could have been written like this:
+/// \code
+///   Kokkos::pair<int, bool> result = someFunction ();
+///   count = result.first;
+///   success = result.second;
+/// \endcode
+///
+/// Using tie() saves two lines of code and avoids a copy of each
+/// element of the pair.  The latter could be significant if one or
+/// both elements of the pair are more substantial objects than \c int
+/// or \c bool.
+template <class T1, class T2>
+KOKKOS_FORCEINLINE_FUNCTION pair<T1&, T2&> tie(T1& x, T2& y) {
+  return (pair<T1&, T2&>(x, y));
+}
+
+//
+// Specialization of Kokkos::pair for a \c void second argument.  This
+// is not actually a "pair"; it only contains one element, the first.
+//
+template <class T1>
+struct pair<T1, void> {
+  using first_type  = T1;
+  using second_type = void;
+
+  first_type first;
+  enum { second = 0 };
+
+  KOKKOS_DEFAULTED_FUNCTION constexpr pair() = default;
+
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const first_type& f) : first(f) {}
+
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const first_type& f, int)
+      : first(f) {}
+
+  template <class U>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr pair(const pair<U, void>& p)
+      : first(p.first) {}
+
+  template <class U>
+  KOKKOS_FORCEINLINE_FUNCTION pair<T1, void>& operator=(
+      const pair<U, void>& p) {
+    first = p.first;
+    return *this;
+  }
+};
+
+//
+// Specialization of relational operators for Kokkos::pair<T1,void>.
+//
+
+template <class T1>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator==(
+    const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
+  return lhs.first == rhs.first;
+}
+
+template <class T1>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator!=(
+    const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
+  return !(lhs == rhs);
+}
+
+template <class T1>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<(
+    const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
+  return lhs.first < rhs.first;
+}
+
+template <class T1>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator<=(
+    const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
+  return !(rhs < lhs);
+}
+
+template <class T1>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>(
+    const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
+  return rhs < lhs;
+}
+
+template <class T1>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool operator>=(
+    const pair<T1, void>& lhs, const pair<T1, void>& rhs) {
+  return !(lhs < rhs);
+}
+
+namespace Impl {
+
+template <class T>
+struct is_pair_like : std::false_type {};
+template <class T, class U>
+struct is_pair_like<Kokkos::pair<T, U>> : std::true_type {};
+template <class T, class U>
+struct is_pair_like<std::pair<T, U>> : std::true_type {};
+
+}  // end namespace Impl
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PAIR
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PAIR
+#endif
+#endif  // KOKKOS_PAIR_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Parallel.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Parallel.hpp
new file mode 100644 (file)
index 0000000..2b5e39d
--- /dev/null
@@ -0,0 +1,586 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_Parallel.hpp
+/// \brief Declaration of parallel operators
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_PARALLEL_HPP
+#define KOKKOS_PARALLEL_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_DetectionIdiom.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <Kokkos_View.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_Tools_Generic.hpp>
+
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_FunctorAnalysis.hpp>
+
+#include <cstddef>
+#include <type_traits>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class T>
+using execution_space_t = typename T::execution_space;
+
+template <class T>
+using device_type_t = typename T::device_type;
+
+//----------------------------------------------------------------------------
+/** \brief  Given a Functor and Execution Policy query an execution space.
+ *
+ *  if       the Policy has an execution space use that
+ *  else if  the Functor has an execution_space use that
+ *  else if  the Functor has a device_type use that for backward compatibility
+ *  else     use the default
+ */
+
+template <class Functor, class Policy>
+struct FunctorPolicyExecutionSpace {
+  using policy_execution_space  = detected_t<execution_space_t, Policy>;
+  using functor_execution_space = detected_t<execution_space_t, Functor>;
+  using functor_device_type     = detected_t<device_type_t, Functor>;
+  using functor_device_type_execution_space =
+      detected_t<execution_space_t, functor_device_type>;
+
+  static_assert(
+      !is_detected<execution_space_t, Policy>::value ||
+          !is_detected<execution_space_t, Functor>::value ||
+          std::is_same<policy_execution_space, functor_execution_space>::value,
+      "A policy with an execution space and a functor with an execution space "
+      "are given but the execution space types do not match!");
+  static_assert(!is_detected<execution_space_t, Policy>::value ||
+                    !is_detected<device_type_t, Functor>::value ||
+                    std::is_same<policy_execution_space,
+                                 functor_device_type_execution_space>::value,
+                "A policy with an execution space and a functor with a device "
+                "type are given but the execution space types do not match!");
+  static_assert(!is_detected<device_type_t, Functor>::value ||
+                    !is_detected<execution_space_t, Functor>::value ||
+                    std::is_same<functor_device_type_execution_space,
+                                 functor_execution_space>::value,
+                "A functor with both an execution space and device type is "
+                "given but their execution space types do not match!");
+
+  using execution_space = detected_or_t<
+      detected_or_t<
+          std::conditional_t<
+              is_detected<device_type_t, Functor>::value,
+              detected_t<execution_space_t, detected_t<device_type_t, Functor>>,
+              Kokkos::DefaultExecutionSpace>,
+          execution_space_t, Functor>,
+      execution_space_t, Policy>;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/** \brief Execute \c functor in parallel according to the execution \c policy.
+ *
+ * A "functor" is a class containing the function to execute in parallel,
+ * data needed for that execution, and an optional \c execution_space
+ * alias.  Here is an example functor for parallel_for:
+ *
+ * \code
+ *  class FunctorType {
+ *  public:
+ *    using execution_space = ...;
+ *    void operator() ( WorkType iwork ) const ;
+ *  };
+ * \endcode
+ *
+ * In the above example, \c WorkType is any integer type for which a
+ * valid conversion from \c size_t to \c IntType exists.  Its
+ * <tt>operator()</tt> method defines the operation to parallelize,
+ * over the range of integer indices <tt>iwork=[0,work_count-1]</tt>.
+ * This compares to a single iteration \c iwork of a \c for loop.
+ * If \c execution_space is not defined DefaultExecutionSpace will be used.
+ */
+template <
+    class ExecPolicy, class FunctorType,
+    class Enable = std::enable_if_t<is_execution_policy<ExecPolicy>::value>>
+inline void parallel_for(const std::string& str, const ExecPolicy& policy,
+                         const FunctorType& functor) {
+  uint64_t kpID = 0;
+
+  ExecPolicy inner_policy = policy;
+  Kokkos::Tools::Impl::begin_parallel_for(inner_policy, functor, str, kpID);
+
+  Kokkos::Impl::shared_allocation_tracking_disable();
+  Impl::ParallelFor<FunctorType, ExecPolicy> closure(functor, inner_policy);
+  Kokkos::Impl::shared_allocation_tracking_enable();
+
+  closure.execute();
+
+  Kokkos::Tools::Impl::end_parallel_for(inner_policy, functor, str, kpID);
+}
+
+template <class ExecPolicy, class FunctorType>
+inline void parallel_for(
+    const ExecPolicy& policy, const FunctorType& functor,
+    std::enable_if_t<is_execution_policy<ExecPolicy>::value>* = nullptr) {
+  Kokkos::parallel_for("", policy, functor);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class ExecPolicy, class FunctorType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload taking the label as first argument instead!")
+inline void parallel_for(
+    const ExecPolicy& policy, const FunctorType& functor,
+    const std::string& str,
+    std::enable_if_t<is_execution_policy<ExecPolicy>::value>* = nullptr) {
+  Kokkos::parallel_for(str, policy, functor);
+}
+#endif
+
+template <class FunctorType>
+inline void parallel_for(const std::string& str, const size_t work_count,
+                         const FunctorType& functor) {
+  using execution_space =
+      typename Impl::FunctorPolicyExecutionSpace<FunctorType,
+                                                 void>::execution_space;
+  using policy = RangePolicy<execution_space>;
+
+  policy execution_policy = policy(0, work_count);
+  ::Kokkos::parallel_for(str, execution_policy, functor);
+}
+
+template <class FunctorType>
+inline void parallel_for(const size_t work_count, const FunctorType& functor) {
+  ::Kokkos::parallel_for("", work_count, functor);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class FunctorType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload taking the label as first argument instead!")
+inline void parallel_for(const size_t work_count, const FunctorType& functor,
+                         const std::string& str) {
+  ::Kokkos::parallel_for(str, work_count, functor);
+}
+#endif
+
+}  // namespace Kokkos
+
+#include <Kokkos_Parallel_Reduce.hpp>
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/// \fn parallel_scan
+/// \tparam ExecutionPolicy The execution policy type.
+/// \tparam FunctorType     The scan functor type.
+///
+/// \param policy  [in] The execution policy.
+/// \param functor [in] The scan functor.
+///
+/// This function implements a parallel scan pattern.  The scan can
+/// be either inclusive or exclusive, depending on how you implement
+/// the scan functor.
+///
+/// A scan functor looks almost exactly like a reduce functor, except
+/// that its operator() takes a third \c bool argument, \c final_pass,
+/// which indicates whether this is the last pass of the scan
+/// operation.  We will show below how to use the \c final_pass
+/// argument to control whether the scan is inclusive or exclusive.
+///
+/// Here is the minimum required interface of a scan functor for a POD
+/// (plain old data) value type \c PodType.  That is, the result is a
+/// View of zero or more PodType.  It is also possible for the result
+/// to be an array of (same-sized) arrays of PodType, but we do not
+/// show the required interface for that here.
+/// \code
+/// template< class ExecPolicy , class FunctorType >
+/// class ScanFunctor {
+/// public:
+///   // The Kokkos device type
+///   using execution_space = ...;
+///   // Type of an entry of the array containing the result;
+///   // also the type of each of the entries combined using
+///   // operator() or join().
+///   using value_type = PodType;
+///
+///   void operator () (const ExecPolicy::member_type & i,
+///                     value_type& update,
+///                     const bool final_pass) const;
+///   void init (value_type& update) const;
+///   void join (value_type& update,
+//               const value_type& input) const
+/// };
+/// \endcode
+///
+/// Here is an example of a functor which computes an inclusive plus-scan
+/// of an array of \c int, in place.  If given an array [1, 2, 3, 4], this
+/// scan will overwrite that array with [1, 3, 6, 10].
+///
+/// \code
+/// template<class SpaceType>
+/// class InclScanFunctor {
+/// public:
+///   using execution_space = SpaceType;
+///   using value_type = int;
+///   using size_type = typename SpaceType::size_type;
+///
+///   InclScanFunctor( Kokkos::View<value_type*, execution_space> x
+///                  , Kokkos::View<value_type*, execution_space> y ) : m_x(x),
+///                  m_y(y) {}
+///
+///   void operator () (const size_type i, value_type& update, const bool
+///   final_pass) const {
+///     update += m_x(i);
+///     if (final_pass) {
+///       m_y(i) = update;
+///     }
+///   }
+///   void init (value_type& update) const {
+///     update = 0;
+///   }
+///   void join (value_type& update, const value_type& input)
+///   const {
+///     update += input;
+///   }
+///
+/// private:
+///   Kokkos::View<value_type*, execution_space> m_x;
+///   Kokkos::View<value_type*, execution_space> m_y;
+/// };
+/// \endcode
+///
+/// Here is an example of a functor which computes an <i>exclusive</i>
+/// scan of an array of \c int, in place.  In operator(), note both
+/// that the final_pass test and the update have switched places, and
+/// the use of a temporary.  If given an array [1, 2, 3, 4], this scan
+/// will overwrite that array with [0, 1, 3, 6].
+///
+/// \code
+/// template<class SpaceType>
+/// class ExclScanFunctor {
+/// public:
+///   using execution_space = SpaceType;
+///   using value_type = int;
+///   using size_type = typename SpaceType::size_type;
+///
+///   ExclScanFunctor (Kokkos::View<value_type*, execution_space> x) : x_ (x) {}
+///
+///   void operator () (const size_type i, value_type& update, const bool
+///   final_pass) const {
+///     const value_type x_i = x_(i);
+///     if (final_pass) {
+///       x_(i) = update;
+///     }
+///     update += x_i;
+///   }
+///   void init (value_type& update) const {
+///     update = 0;
+///   }
+///   void join (value_type& update, const value_type& input)
+///   const {
+///     update += input;
+///   }
+///
+/// private:
+///   Kokkos::View<value_type*, execution_space> x_;
+/// };
+/// \endcode
+///
+/// Here is an example of a functor which builds on the above
+/// exclusive scan example, to compute an offsets array from a
+/// population count array, in place.  We assume that the pop count
+/// array has an extra entry at the end to store the final count.  If
+/// given an array [1, 2, 3, 4, 0], this scan will overwrite that
+/// array with [0, 1, 3, 6, 10].
+///
+/// \code
+/// template<class SpaceType>
+/// class OffsetScanFunctor {
+/// public:
+///   using execution_space = SpaceType;
+///   using value_type = int;
+///   using size_type = typename SpaceType::size_type;
+///
+///   // lastIndex_ is the last valid index (zero-based) of x.
+///   // If x has length zero, then lastIndex_ won't be used anyway.
+///   OffsetScanFunctor( Kokkos::View<value_type*, execution_space> x
+///                    , Kokkos::View<value_type*, execution_space> y )
+///      : m_x(x), m_y(y), last_index_ (x.dimension_0 () == 0 ? 0 :
+///      x.dimension_0 () - 1)
+///   {}
+///
+///   void operator () (const size_type i, int& update, const bool final_pass)
+///   const {
+///     if (final_pass) {
+///       m_y(i) = update;
+///     }
+///     update += m_x(i);
+///     // The last entry of m_y gets the final sum.
+///     if (final_pass && i == last_index_) {
+///       m_y(i+1) = update;
+// i/     }
+///   }
+///   void init (value_type& update) const {
+///     update = 0;
+///   }
+///   void join (value_type& update, const value_type& input)
+///   const {
+///     update += input;
+///   }
+///
+/// private:
+///   Kokkos::View<value_type*, execution_space> m_x;
+///   Kokkos::View<value_type*, execution_space> m_y;
+///   const size_type last_index_;
+/// };
+/// \endcode
+///
+template <class ExecutionPolicy, class FunctorType,
+          class Enable =
+              std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>>
+inline void parallel_scan(const std::string& str, const ExecutionPolicy& policy,
+                          const FunctorType& functor) {
+  uint64_t kpID                = 0;
+  ExecutionPolicy inner_policy = policy;
+  Kokkos::Tools::Impl::begin_parallel_scan(inner_policy, functor, str, kpID);
+
+  Kokkos::Impl::shared_allocation_tracking_disable();
+  Impl::ParallelScan<FunctorType, ExecutionPolicy> closure(functor,
+                                                           inner_policy);
+  Kokkos::Impl::shared_allocation_tracking_enable();
+
+  closure.execute();
+
+  Kokkos::Tools::Impl::end_parallel_scan(inner_policy, functor, str, kpID);
+}
+
+template <class ExecutionPolicy, class FunctorType>
+inline void parallel_scan(
+    const ExecutionPolicy& policy, const FunctorType& functor,
+    std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>* = nullptr) {
+  ::Kokkos::parallel_scan("", policy, functor);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class ExecutionPolicy, class FunctorType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload taking the label as first argument instead!")
+inline void parallel_scan(
+    const ExecutionPolicy& policy, const FunctorType& functor,
+    const std::string& str,
+    std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>* = nullptr) {
+  ::Kokkos::parallel_scan(str, policy, functor);
+}
+#endif
+
+template <class FunctorType>
+inline void parallel_scan(const std::string& str, const size_t work_count,
+                          const FunctorType& functor) {
+  using execution_space =
+      typename Kokkos::Impl::FunctorPolicyExecutionSpace<FunctorType,
+                                                         void>::execution_space;
+
+  using policy = Kokkos::RangePolicy<execution_space>;
+
+  policy execution_policy(0, work_count);
+  parallel_scan(str, execution_policy, functor);
+}
+
+template <class FunctorType>
+inline void parallel_scan(const size_t work_count, const FunctorType& functor) {
+  ::Kokkos::parallel_scan("", work_count, functor);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class FunctorType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload taking the label as first argument instead!")
+inline void parallel_scan(const size_t work_count, const FunctorType& functor,
+                          const std::string& str) {
+  ::Kokkos::parallel_scan(str, work_count, functor);
+}
+#endif
+
+template <class ExecutionPolicy, class FunctorType, class ReturnType,
+          class Enable =
+              std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>>
+inline void parallel_scan(const std::string& str, const ExecutionPolicy& policy,
+                          const FunctorType& functor,
+                          ReturnType& return_value) {
+  uint64_t kpID                = 0;
+  ExecutionPolicy inner_policy = policy;
+  Kokkos::Tools::Impl::begin_parallel_scan(inner_policy, functor, str, kpID);
+
+  Kokkos::Impl::shared_allocation_tracking_disable();
+  Impl::ParallelScanWithTotal<FunctorType, ExecutionPolicy, ReturnType> closure(
+      functor, inner_policy, return_value);
+  Kokkos::Impl::shared_allocation_tracking_enable();
+
+  closure.execute();
+
+  Kokkos::Tools::Impl::end_parallel_scan(inner_policy, functor, str, kpID);
+
+  policy.space().fence(
+      "Kokkos::parallel_scan: fence due to result being a value, not a view");
+}
+
+template <class ExecutionPolicy, class FunctorType, class ReturnType>
+inline void parallel_scan(
+    const ExecutionPolicy& policy, const FunctorType& functor,
+    ReturnType& return_value,
+    std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>* = nullptr) {
+  ::Kokkos::parallel_scan("", policy, functor, return_value);
+}
+
+#ifdef KOKKOS_ENABLE_DISABLE_DEPRECATED_CODE_3
+template <class ExecutionPolicy, class FunctorType, class ReturnType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload taking the label as first argument instead!")
+inline void parallel_scan(
+    const ExecutionPolicy& policy, const FunctorType& functor,
+    ReturnType& return_value, const std::string& str,
+    std::enable_if_t<is_execution_policy<ExecutionPolicy>::value>* = nullptr) {
+  ::Kokkos::parallel_scan(str, policy, functor, return_value);
+}
+#endif
+
+template <class FunctorType, class ReturnType>
+inline void parallel_scan(const std::string& str, const size_t work_count,
+                          const FunctorType& functor,
+                          ReturnType& return_value) {
+  using execution_space =
+      typename Kokkos::Impl::FunctorPolicyExecutionSpace<FunctorType,
+                                                         void>::execution_space;
+
+  using policy = Kokkos::RangePolicy<execution_space>;
+
+  policy execution_policy(0, work_count);
+  parallel_scan(str, execution_policy, functor, return_value);
+}
+
+template <class FunctorType, class ReturnType>
+inline void parallel_scan(const size_t work_count, const FunctorType& functor,
+                          ReturnType& return_value) {
+  ::Kokkos::parallel_scan("", work_count, functor, return_value);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class FunctorType, class ReturnType>
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use the overload taking the label as first argument instead!")
+inline void parallel_scan(const size_t work_count, const FunctorType& functor,
+                          ReturnType& return_value, const std::string& str) {
+  ::Kokkos::parallel_scan(str, work_count, functor, return_value);
+}
+#endif
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType,
+          bool HasTeamShmemSize =
+              has_member_team_shmem_size<FunctorType>::value,
+          bool HasShmemSize = has_member_shmem_size<FunctorType>::value>
+struct FunctorTeamShmemSize {
+  KOKKOS_INLINE_FUNCTION static size_t value(const FunctorType&, int) {
+    return 0;
+  }
+};
+
+template <class FunctorType>
+struct FunctorTeamShmemSize<FunctorType, true, false> {
+  static inline size_t value(const FunctorType& f, int team_size) {
+    return f.team_shmem_size(team_size);
+  }
+};
+
+template <class FunctorType>
+struct FunctorTeamShmemSize<FunctorType, false, true> {
+  static inline size_t value(const FunctorType& f, int team_size) {
+    return f.shmem_size(team_size);
+  }
+};
+template <class FunctorType>
+struct FunctorTeamShmemSize<FunctorType, true, true> {
+  static inline size_t value(const FunctorType& /*f*/, int /*team_size*/) {
+    Kokkos::abort(
+        "Functor with both team_shmem_size and shmem_size defined is "
+        "not allowed");
+    return 0;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* KOKKOS_PARALLEL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Parallel_Reduce.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Parallel_Reduce.hpp
new file mode 100644 (file)
index 0000000..9213383
--- /dev/null
@@ -0,0 +1,1805 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_PARALLEL_REDUCE_HPP
+#define KOKKOS_PARALLEL_REDUCE_HPP
+
+#include <Kokkos_NumericTraits.hpp>
+#include <Kokkos_View.hpp>
+#include <impl/Kokkos_FunctorAnalysis.hpp>
+#include <impl/Kokkos_Tools_Generic.hpp>
+#include <type_traits>
+#include <iostream>
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <class T>
+using is_reducer_type KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Use Kokkos::is_reducer instead!") = Kokkos::is_reducer<T>;
+#endif
+
+template <class Scalar, class Space>
+struct Sum {
+ public:
+  // Required
+  using reducer    = Sum<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  Sum(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  Sum(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const { dest += src; }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::sum();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Space>
+struct Prod {
+ public:
+  // Required
+  using reducer    = Prod<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  Prod(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  Prod(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const { dest *= src; }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::prod();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Space>
+struct Min {
+ public:
+  // Required
+  using reducer    = Min<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  Min(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  Min(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src < dest) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Space>
+struct Max {
+ public:
+  // Required
+  using reducer    = Max<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  Max(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  Max(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src > dest) dest = src;
+  }
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::max();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Space>
+struct LAnd {
+ public:
+  // Required
+  using reducer    = LAnd<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  LAnd(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  LAnd(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest = dest && src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::land();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Space>
+struct LOr {
+ public:
+  // Required
+  using reducer    = LOr<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  LOr(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  LOr(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest = dest || src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::lor();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Space>
+struct BAnd {
+ public:
+  // Required
+  using reducer    = BAnd<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  BAnd(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  BAnd(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest = dest & src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::band();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Space>
+struct BOr {
+ public:
+  // Required
+  using reducer    = BOr<Scalar, Space>;
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  BOr(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  BOr(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest = dest | src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val = reduction_identity<value_type>::bor();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Index>
+struct ValLocScalar {
+  Scalar val;
+  Index loc;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const ValLocScalar& rhs) {
+    val = rhs.val;
+    loc = rhs.loc;
+  }
+};
+
+template <class Scalar, class Index, class Space>
+struct MinLoc {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = MinLoc<Scalar, Index, Space>;
+  using value_type = ValLocScalar<scalar_type, index_type>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MinLoc(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MinLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src.val < dest.val) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.val = reduction_identity<scalar_type>::min();
+    val.loc = reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Index, class Space>
+struct MaxLoc {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = MaxLoc<Scalar, Index, Space>;
+  using value_type = ValLocScalar<scalar_type, index_type>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MaxLoc(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MaxLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src.val > dest.val) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.val = reduction_identity<scalar_type>::max();
+    val.loc = reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar>
+struct MinMaxScalar {
+  Scalar min_val, max_val;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const MinMaxScalar& rhs) {
+    min_val = rhs.min_val;
+    max_val = rhs.max_val;
+  }
+};
+
+template <class Scalar, class Space>
+struct MinMax {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+
+ public:
+  // Required
+  using reducer    = MinMax<Scalar, Space>;
+  using value_type = MinMaxScalar<scalar_type>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MinMax(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MinMax(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+    }
+    if (src.max_val > dest.max_val) {
+      dest.max_val = src.max_val;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.max_val = reduction_identity<scalar_type>::max();
+    val.min_val = reduction_identity<scalar_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Scalar, class Index>
+struct MinMaxLocScalar {
+  Scalar min_val, max_val;
+  Index min_loc, max_loc;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const MinMaxLocScalar& rhs) {
+    min_val = rhs.min_val;
+    min_loc = rhs.min_loc;
+    max_val = rhs.max_val;
+    max_loc = rhs.max_loc;
+  }
+};
+
+template <class Scalar, class Index, class Space>
+struct MinMaxLoc {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = MinMaxLoc<Scalar, Index, Space>;
+  using value_type = MinMaxLocScalar<scalar_type, index_type>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MinMaxLoc(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MinMaxLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+      dest.min_loc = src.min_loc;
+    }
+    if (src.max_val > dest.max_val) {
+      dest.max_val = src.max_val;
+      dest.max_loc = src.max_loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.max_val = reduction_identity<scalar_type>::max();
+    val.min_val = reduction_identity<scalar_type>::min();
+    val.max_loc = reduction_identity<index_type>::min();
+    val.min_loc = reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+// --------------------------------------------------
+// reducers added to support std algorithms
+// --------------------------------------------------
+
+//
+// MaxFirstLoc
+//
+template <class Scalar, class Index, class Space>
+struct MaxFirstLoc {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = MaxFirstLoc<Scalar, Index, Space>;
+  using value_type = ::Kokkos::ValLocScalar<scalar_type, index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MaxFirstLoc(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MaxFirstLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (dest.val < src.val) {
+      dest = src;
+    } else if (!(src.val < dest.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.val = reduction_identity<scalar_type>::max();
+    val.loc = reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+//
+// MaxFirstLocCustomComparator
+// recall that comp(a,b) returns true is a < b
+//
+template <class Scalar, class Index, class ComparatorType, class Space>
+struct MaxFirstLocCustomComparator {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer =
+      MaxFirstLocCustomComparator<Scalar, Index, ComparatorType, Space>;
+  using value_type = ::Kokkos::ValLocScalar<scalar_type, index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+  ComparatorType m_comp;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MaxFirstLocCustomComparator(value_type& value_, ComparatorType comp_)
+      : value(&value_), references_scalar_v(true), m_comp(comp_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MaxFirstLocCustomComparator(const result_view_type& value_,
+                              ComparatorType comp_)
+      : value(value_), references_scalar_v(false), m_comp(comp_) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (m_comp(dest.val, src.val)) {
+      dest = src;
+    } else if (!m_comp(src.val, dest.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.val = reduction_identity<scalar_type>::max();
+    val.loc = reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+//
+// MinFirstLoc
+//
+template <class Scalar, class Index, class Space>
+struct MinFirstLoc {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = MinFirstLoc<Scalar, Index, Space>;
+  using value_type = ::Kokkos::ValLocScalar<scalar_type, index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MinFirstLoc(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MinFirstLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src.val < dest.val) {
+      dest = src;
+    } else if (!(dest.val < src.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.val = reduction_identity<scalar_type>::min();
+    val.loc = reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+//
+// MinFirstLocCustomComparator
+// recall that comp(a,b) returns true is a < b
+//
+template <class Scalar, class Index, class ComparatorType, class Space>
+struct MinFirstLocCustomComparator {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer =
+      MinFirstLocCustomComparator<Scalar, Index, ComparatorType, Space>;
+  using value_type = ::Kokkos::ValLocScalar<scalar_type, index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+  ComparatorType m_comp;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MinFirstLocCustomComparator(value_type& value_, ComparatorType comp_)
+      : value(&value_), references_scalar_v(true), m_comp(comp_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MinFirstLocCustomComparator(const result_view_type& value_,
+                              ComparatorType comp_)
+      : value(value_), references_scalar_v(false), m_comp(comp_) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (m_comp(src.val, dest.val)) {
+      dest = src;
+    } else if (!m_comp(dest.val, src.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.val = reduction_identity<scalar_type>::min();
+    val.loc = reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+//
+// MinMaxFirstLastLoc
+//
+template <class Scalar, class Index, class Space>
+struct MinMaxFirstLastLoc {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = MinMaxFirstLastLoc<Scalar, Index, Space>;
+  using value_type = ::Kokkos::MinMaxLocScalar<scalar_type, index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MinMaxFirstLastLoc(value_type& value_)
+      : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MinMaxFirstLastLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+      dest.min_loc = src.min_loc;
+    } else if (!(dest.min_val < src.min_val)) {
+      dest.min_loc = (src.min_loc < dest.min_loc) ? src.min_loc : dest.min_loc;
+    }
+
+    if (dest.max_val < src.max_val) {
+      dest.max_val = src.max_val;
+      dest.max_loc = src.max_loc;
+    } else if (!(src.max_val < dest.max_val)) {
+      dest.max_loc = (src.max_loc > dest.max_loc) ? src.max_loc : dest.max_loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.max_val = ::Kokkos::reduction_identity<scalar_type>::max();
+    val.min_val = ::Kokkos::reduction_identity<scalar_type>::min();
+    val.max_loc = ::Kokkos::reduction_identity<index_type>::max();
+    val.min_loc = ::Kokkos::reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+//
+// MinMaxFirstLastLocCustomComparator
+// recall that comp(a,b) returns true is a < b
+//
+template <class Scalar, class Index, class ComparatorType, class Space>
+struct MinMaxFirstLastLocCustomComparator {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer =
+      MinMaxFirstLastLocCustomComparator<Scalar, Index, ComparatorType, Space>;
+  using value_type = ::Kokkos::MinMaxLocScalar<scalar_type, index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+  ComparatorType m_comp;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MinMaxFirstLastLocCustomComparator(value_type& value_, ComparatorType comp_)
+      : value(&value_), references_scalar_v(true), m_comp(comp_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MinMaxFirstLastLocCustomComparator(const result_view_type& value_,
+                                     ComparatorType comp_)
+      : value(value_), references_scalar_v(false), m_comp(comp_) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    if (m_comp(src.min_val, dest.min_val)) {
+      dest.min_val = src.min_val;
+      dest.min_loc = src.min_loc;
+    } else if (!m_comp(dest.min_val, src.min_val)) {
+      dest.min_loc = (src.min_loc < dest.min_loc) ? src.min_loc : dest.min_loc;
+    }
+
+    if (m_comp(dest.max_val, src.max_val)) {
+      dest.max_val = src.max_val;
+      dest.max_loc = src.max_loc;
+    } else if (!m_comp(src.max_val, dest.max_val)) {
+      dest.max_loc = (src.max_loc > dest.max_loc) ? src.max_loc : dest.max_loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.max_val = ::Kokkos::reduction_identity<scalar_type>::max();
+    val.min_val = ::Kokkos::reduction_identity<scalar_type>::min();
+    val.max_loc = ::Kokkos::reduction_identity<index_type>::max();
+    val.min_loc = ::Kokkos::reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+//
+// FirstLoc
+//
+template <class Index>
+struct FirstLocScalar {
+  Index min_loc_true;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const FirstLocScalar& rhs) { min_loc_true = rhs.min_loc_true; }
+};
+
+template <class Index, class Space>
+struct FirstLoc {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = FirstLoc<Index, Space>;
+  using value_type = FirstLocScalar<index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  FirstLoc(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  FirstLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest.min_loc_true = (src.min_loc_true < dest.min_loc_true)
+                            ? src.min_loc_true
+                            : dest.min_loc_true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.min_loc_true = ::Kokkos::reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+//
+// LastLoc
+//
+template <class Index>
+struct LastLocScalar {
+  Index max_loc_true;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const LastLocScalar& rhs) { max_loc_true = rhs.max_loc_true; }
+};
+
+template <class Index, class Space>
+struct LastLoc {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = LastLoc<Index, Space>;
+  using value_type = LastLocScalar<index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  LastLoc(value_type& value_) : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  LastLoc(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest.max_loc_true = (src.max_loc_true > dest.max_loc_true)
+                            ? src.max_loc_true
+                            : dest.max_loc_true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.max_loc_true = ::Kokkos::reduction_identity<index_type>::max();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Index>
+struct StdIsPartScalar {
+  Index max_loc_true, min_loc_false;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const StdIsPartScalar& rhs) {
+    min_loc_false = rhs.min_loc_false;
+    max_loc_true  = rhs.max_loc_true;
+  }
+};
+
+//
+// StdIsPartitioned
+//
+template <class Index, class Space>
+struct StdIsPartitioned {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = StdIsPartitioned<Index, Space>;
+  using value_type = StdIsPartScalar<index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  StdIsPartitioned(value_type& value_)
+      : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  StdIsPartitioned(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest.max_loc_true = (dest.max_loc_true < src.max_loc_true)
+                            ? src.max_loc_true
+                            : dest.max_loc_true;
+
+    dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+                             ? dest.min_loc_false
+                             : src.min_loc_false;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.max_loc_true  = ::Kokkos::reduction_identity<index_type>::max();
+    val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+template <class Index>
+struct StdPartPointScalar {
+  Index min_loc_false;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const StdPartPointScalar& rhs) {
+    min_loc_false = rhs.min_loc_false;
+  }
+};
+
+//
+// StdPartitionPoint
+//
+template <class Index, class Space>
+struct StdPartitionPoint {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using reducer    = StdPartitionPoint<Index, Space>;
+  using value_type = StdPartPointScalar<index_type>;
+
+  using result_view_type = ::Kokkos::View<value_type, Space>;
+
+ private:
+  result_view_type value;
+  bool references_scalar_v;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  StdPartitionPoint(value_type& value_)
+      : value(&value_), references_scalar_v(true) {}
+
+  KOKKOS_INLINE_FUNCTION
+  StdPartitionPoint(const result_view_type& value_)
+      : value(value_), references_scalar_v(false) {}
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  void join(value_type& dest, const value_type& src) const {
+    dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+                             ? dest.min_loc_false
+                             : src.min_loc_false;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void init(value_type& val) const {
+    val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  value_type& reference() const { return *value.data(); }
+
+  KOKKOS_INLINE_FUNCTION
+  result_view_type view() const { return value; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool references_scalar() const { return references_scalar_v; }
+};
+
+}  // namespace Kokkos
+namespace Kokkos {
+namespace Impl {
+
+template <class T, class ReturnType, class ValueTraits>
+struct ParallelReduceReturnValue;
+
+template <class ReturnType, class FunctorType>
+struct ParallelReduceReturnValue<
+    std::enable_if_t<Kokkos::is_view<ReturnType>::value>, ReturnType,
+    FunctorType> {
+  using return_type  = ReturnType;
+  using reducer_type = InvalidType;
+
+  using value_type_scalar = typename return_type::value_type;
+  using value_type_array  = typename return_type::value_type* const;
+
+  using value_type = std::conditional_t<return_type::rank == 0,
+                                        value_type_scalar, value_type_array>;
+
+  static return_type& return_value(ReturnType& return_val, const FunctorType&) {
+    return return_val;
+  }
+};
+
+template <class ReturnType, class FunctorType>
+struct ParallelReduceReturnValue<
+    std::enable_if_t<!Kokkos::is_view<ReturnType>::value &&
+                     (!std::is_array<ReturnType>::value &&
+                      !std::is_pointer<ReturnType>::value) &&
+                     !Kokkos::is_reducer<ReturnType>::value>,
+    ReturnType, FunctorType> {
+  using return_type =
+      Kokkos::View<ReturnType, Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
+
+  using reducer_type = InvalidType;
+
+  using value_type = typename return_type::value_type;
+
+  static return_type return_value(ReturnType& return_val, const FunctorType&) {
+    return return_type(&return_val);
+  }
+};
+
+template <class ReturnType, class FunctorType>
+struct ParallelReduceReturnValue<
+    std::enable_if_t<(std::is_array<ReturnType>::value ||
+                      std::is_pointer<ReturnType>::value)>,
+    ReturnType, FunctorType> {
+  using return_type = Kokkos::View<std::remove_const_t<ReturnType>,
+                                   Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
+
+  using reducer_type = InvalidType;
+
+  using value_type = typename return_type::value_type[];
+
+  static return_type return_value(ReturnType& return_val,
+                                  const FunctorType& functor) {
+    if (std::is_array<ReturnType>::value)
+      return return_type(return_val);
+    else
+      return return_type(return_val, functor.value_count);
+  }
+};
+
+template <class ReturnType, class FunctorType>
+struct ParallelReduceReturnValue<
+    std::enable_if_t<Kokkos::is_reducer<ReturnType>::value>, ReturnType,
+    FunctorType> {
+  using return_type  = ReturnType;
+  using reducer_type = ReturnType;
+  using value_type   = typename return_type::value_type;
+
+  static return_type return_value(ReturnType& return_val, const FunctorType&) {
+    return return_val;
+  }
+};
+
+template <class T, class ReturnType, class FunctorType>
+struct ParallelReducePolicyType;
+
+template <class PolicyType, class FunctorType>
+struct ParallelReducePolicyType<
+    std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value>,
+    PolicyType, FunctorType> {
+  using policy_type = PolicyType;
+  static PolicyType policy(const PolicyType& policy_) { return policy_; }
+};
+
+template <class PolicyType, class FunctorType>
+struct ParallelReducePolicyType<
+    std::enable_if_t<std::is_integral<PolicyType>::value>, PolicyType,
+    FunctorType> {
+  using execution_space =
+      typename Impl::FunctorPolicyExecutionSpace<FunctorType,
+                                                 void>::execution_space;
+
+  using policy_type = Kokkos::RangePolicy<execution_space>;
+
+  static policy_type policy(const PolicyType& policy_) {
+    return policy_type(0, policy_);
+  }
+};
+
+template <class FunctorType, class ExecPolicy, class ValueType,
+          class ExecutionSpace>
+struct ParallelReduceFunctorType {
+  using functor_type = FunctorType;
+  static const functor_type& functor(const functor_type& functor) {
+    return functor;
+  }
+};
+
+template <class PolicyType, class FunctorType, class ReturnType>
+struct ParallelReduceAdaptor {
+  using return_value_adapter =
+      Impl::ParallelReduceReturnValue<void, ReturnType, FunctorType>;
+
+  static inline void execute_impl(const std::string& label,
+                                  const PolicyType& policy,
+                                  const FunctorType& functor,
+                                  ReturnType& return_value) {
+    uint64_t kpID = 0;
+
+    PolicyType inner_policy = policy;
+    Kokkos::Tools::Impl::begin_parallel_reduce<
+        typename return_value_adapter::reducer_type>(inner_policy, functor,
+                                                     label, kpID);
+
+    Kokkos::Impl::shared_allocation_tracking_disable();
+    Impl::ParallelReduce<FunctorType, PolicyType,
+                         typename return_value_adapter::reducer_type>
+        closure(functor, inner_policy,
+                return_value_adapter::return_value(return_value, functor));
+    Kokkos::Impl::shared_allocation_tracking_enable();
+    closure.execute();
+
+    Kokkos::Tools::Impl::end_parallel_reduce<
+        typename return_value_adapter::reducer_type>(inner_policy, functor,
+                                                     label, kpID);
+  }
+
+  static constexpr bool is_array_reduction =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+                            FunctorType>::StaticValueSize == 0;
+
+  template <typename Dummy = ReturnType>
+  static inline std::enable_if_t<!(is_array_reduction &&
+                                   std::is_pointer<Dummy>::value)>
+  execute(const std::string& label, const PolicyType& policy,
+          const FunctorType& functor, ReturnType& return_value) {
+    execute_impl(label, policy, functor, return_value);
+  }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  template <typename Dummy = ReturnType>
+  KOKKOS_DEPRECATED_WITH_COMMENT(
+      "Array reductions with a raw pointer return type are deprecated. Use a "
+      "Kokkos::View as return argument!")
+  static inline std::
+      enable_if_t<is_array_reduction && std::is_pointer<Dummy>::value> execute(
+          const std::string& label, const PolicyType& policy,
+          const FunctorType& functor, ReturnType& return_value) {
+    execute_impl(label, policy, functor, return_value);
+  }
+#endif
+};
+}  // namespace Impl
+
+//----------------------------------------------------------------------------
+
+/*! \fn void parallel_reduce(label,policy,functor,return_argument)
+    \brief Perform a parallel reduction.
+    \param label An optional Label giving the call name. Must be able to
+   construct a std::string from the argument. \param policy A Kokkos Execution
+   Policy, such as an integer, a RangePolicy or a TeamPolicy. \param functor A
+   functor with a reduction operator, and optional init, join and final
+   functions. \param return_argument A return argument which can be a scalar, a
+   View, or a ReducerStruct. This argument can be left out if the functor has a
+   final function.
+*/
+
+// Parallel Reduce Blocking behavior
+
+namespace Impl {
+template <typename T>
+struct ReducerHasTestReferenceFunction {
+  template <typename E>
+  static std::true_type test_func(decltype(&E::references_scalar));
+  template <typename E>
+  static std::false_type test_func(...);
+
+  enum {
+    value = std::is_same<std::true_type, decltype(test_func<T>(nullptr))>::value
+  };
+};
+
+template <class ExecutionSpace, class Arg>
+constexpr std::enable_if_t<
+    // constraints only necessary because SFINAE lacks subsumption
+    !ReducerHasTestReferenceFunction<Arg>::value &&
+        !Kokkos::is_view<Arg>::value,
+    // return type:
+    bool>
+parallel_reduce_needs_fence(ExecutionSpace const&, Arg const&) {
+  return true;
+}
+
+template <class ExecutionSpace, class Reducer>
+constexpr std::enable_if_t<
+    // equivalent to:
+    // (requires (Reducer const& r) {
+    //   { reducer.references_scalar() } -> std::convertible_to<bool>;
+    // })
+    ReducerHasTestReferenceFunction<Reducer>::value,
+    // return type:
+    bool>
+parallel_reduce_needs_fence(ExecutionSpace const&, Reducer const& reducer) {
+  return reducer.references_scalar();
+}
+
+template <class ExecutionSpace, class ViewLike>
+constexpr std::enable_if_t<
+    // requires Kokkos::ViewLike<ViewLike>
+    Kokkos::is_view<ViewLike>::value,
+    // return type:
+    bool>
+parallel_reduce_needs_fence(ExecutionSpace const&, ViewLike const&) {
+  return false;
+}
+
+template <class ExecutionSpace, class... Args>
+struct ParallelReduceFence {
+  template <class... ArgsDeduced>
+  static void fence(const ExecutionSpace& ex, const std::string& name,
+                    ArgsDeduced&&... args) {
+    if (Impl::parallel_reduce_needs_fence(ex, (ArgsDeduced &&) args...)) {
+      ex.fence(name);
+    }
+  }
+};
+
+}  // namespace Impl
+
+/** \brief  Parallel reduction
+ *
+ * parallel_reduce performs parallel reductions with arbitrary functions - i.e.
+ * it is not solely data based. The call expects up to 4 arguments:
+ *
+ *
+ * Example of a parallel_reduce functor for a POD (plain old data) value type:
+ * \code
+ *  class FunctorType { // For POD value type
+ *  public:
+ *    using execution_space = ...;
+ *    using value_type = <podType>;
+ *    void operator()( <intType> iwork , <podType> & update ) const ;
+ *    void init( <podType> & update ) const ;
+ *    void join(       <podType> & update ,
+ *               const <podType> & input ) const ;
+ *
+ *    void final( <podType> & update ) const ;
+ *  };
+ * \endcode
+ *
+ * Example of a parallel_reduce functor for an array of POD (plain old data)
+ * values:
+ * \code
+ *  class FunctorType { // For array of POD value
+ *  public:
+ *    using execution_space = ...;
+ *    using value_type = <podType>[];
+ *    void operator()( <intType> , <podType> update[] ) const ;
+ *    void init( <podType> update[] ) const ;
+ *    void join(       <podType> update[] ,
+ *               const <podType> input[] ) const ;
+ *
+ *    void final( <podType> update[] ) const ;
+ *  };
+ * \endcode
+ */
+
+// ReturnValue is scalar or array: take by reference
+
+template <class PolicyType, class FunctorType, class ReturnType>
+inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
+                        !(Kokkos::is_view<ReturnType>::value ||
+                          Kokkos::is_reducer<ReturnType>::value ||
+                          std::is_pointer<ReturnType>::value)>
+parallel_reduce(const std::string& label, const PolicyType& policy,
+                const FunctorType& functor, ReturnType& return_value) {
+  static_assert(
+      !std::is_const<ReturnType>::value,
+      "A const reduction result type is only allowed for a View, pointer or "
+      "reducer return type!");
+
+  Impl::ParallelReduceAdaptor<PolicyType, FunctorType, ReturnType>::execute(
+      label, policy, functor, return_value);
+  Impl::ParallelReduceFence<typename PolicyType::execution_space, ReturnType>::
+      fence(
+          policy.space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+template <class PolicyType, class FunctorType, class ReturnType>
+inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
+                        !(Kokkos::is_view<ReturnType>::value ||
+                          Kokkos::is_reducer<ReturnType>::value ||
+                          std::is_pointer<ReturnType>::value)>
+parallel_reduce(const PolicyType& policy, const FunctorType& functor,
+                ReturnType& return_value) {
+  static_assert(
+      !std::is_const<ReturnType>::value,
+      "A const reduction result type is only allowed for a View, pointer or "
+      "reducer return type!");
+
+  Impl::ParallelReduceAdaptor<PolicyType, FunctorType, ReturnType>::execute(
+      "", policy, functor, return_value);
+  Impl::ParallelReduceFence<typename PolicyType::execution_space, ReturnType>::
+      fence(
+          policy.space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+template <class FunctorType, class ReturnType>
+inline std::enable_if_t<!(Kokkos::is_view<ReturnType>::value ||
+                          Kokkos::is_reducer<ReturnType>::value ||
+                          std::is_pointer<ReturnType>::value)>
+parallel_reduce(const size_t& policy, const FunctorType& functor,
+                ReturnType& return_value) {
+  static_assert(
+      !std::is_const<ReturnType>::value,
+      "A const reduction result type is only allowed for a View, pointer or "
+      "reducer return type!");
+
+  using policy_type =
+      typename Impl::ParallelReducePolicyType<void, size_t,
+                                              FunctorType>::policy_type;
+
+  Impl::ParallelReduceAdaptor<policy_type, FunctorType, ReturnType>::execute(
+      "", policy_type(0, policy), functor, return_value);
+  Impl::ParallelReduceFence<typename policy_type::execution_space, ReturnType>::
+      fence(
+          typename policy_type::execution_space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+template <class FunctorType, class ReturnType>
+inline std::enable_if_t<!(Kokkos::is_view<ReturnType>::value ||
+                          Kokkos::is_reducer<ReturnType>::value ||
+                          std::is_pointer<ReturnType>::value)>
+parallel_reduce(const std::string& label, const size_t& policy,
+                const FunctorType& functor, ReturnType& return_value) {
+  static_assert(
+      !std::is_const<ReturnType>::value,
+      "A const reduction result type is only allowed for a View, pointer or "
+      "reducer return type!");
+
+  using policy_type =
+      typename Impl::ParallelReducePolicyType<void, size_t,
+                                              FunctorType>::policy_type;
+  Impl::ParallelReduceAdaptor<policy_type, FunctorType, ReturnType>::execute(
+      label, policy_type(0, policy), functor, return_value);
+  Impl::ParallelReduceFence<typename policy_type::execution_space, ReturnType>::
+      fence(
+          typename policy_type::execution_space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+// ReturnValue as View or Reducer: take by copy to allow for inline construction
+
+template <class PolicyType, class FunctorType, class ReturnType>
+inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
+                        (Kokkos::is_view<ReturnType>::value ||
+                         Kokkos::is_reducer<ReturnType>::value ||
+                         std::is_pointer<ReturnType>::value)>
+parallel_reduce(const std::string& label, const PolicyType& policy,
+                const FunctorType& functor, const ReturnType& return_value) {
+  ReturnType return_value_impl = return_value;
+  Impl::ParallelReduceAdaptor<PolicyType, FunctorType, ReturnType>::execute(
+      label, policy, functor, return_value_impl);
+  Impl::ParallelReduceFence<typename PolicyType::execution_space, ReturnType>::
+      fence(
+          policy.space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+template <class PolicyType, class FunctorType, class ReturnType>
+inline std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value &&
+                        (Kokkos::is_view<ReturnType>::value ||
+                         Kokkos::is_reducer<ReturnType>::value ||
+                         std::is_pointer<ReturnType>::value)>
+parallel_reduce(const PolicyType& policy, const FunctorType& functor,
+                const ReturnType& return_value) {
+  ReturnType return_value_impl = return_value;
+  Impl::ParallelReduceAdaptor<PolicyType, FunctorType, ReturnType>::execute(
+      "", policy, functor, return_value_impl);
+  Impl::ParallelReduceFence<typename PolicyType::execution_space, ReturnType>::
+      fence(
+          policy.space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+template <class FunctorType, class ReturnType>
+inline std::enable_if_t<Kokkos::is_view<ReturnType>::value ||
+                        Kokkos::is_reducer<ReturnType>::value ||
+                        std::is_pointer<ReturnType>::value>
+parallel_reduce(const size_t& policy, const FunctorType& functor,
+                const ReturnType& return_value) {
+  using policy_type =
+      typename Impl::ParallelReducePolicyType<void, size_t,
+                                              FunctorType>::policy_type;
+  ReturnType return_value_impl = return_value;
+  Impl::ParallelReduceAdaptor<policy_type, FunctorType, ReturnType>::execute(
+      "", policy_type(0, policy), functor, return_value_impl);
+  Impl::ParallelReduceFence<typename policy_type::execution_space, ReturnType>::
+      fence(
+          typename policy_type::execution_space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+template <class FunctorType, class ReturnType>
+inline std::enable_if_t<Kokkos::is_view<ReturnType>::value ||
+                        Kokkos::is_reducer<ReturnType>::value ||
+                        std::is_pointer<ReturnType>::value>
+parallel_reduce(const std::string& label, const size_t& policy,
+                const FunctorType& functor, const ReturnType& return_value) {
+  using policy_type =
+      typename Impl::ParallelReducePolicyType<void, size_t,
+                                              FunctorType>::policy_type;
+  ReturnType return_value_impl = return_value;
+  Impl::ParallelReduceAdaptor<policy_type, FunctorType, ReturnType>::execute(
+      label, policy_type(0, policy), functor, return_value_impl);
+  Impl::ParallelReduceFence<typename policy_type::execution_space, ReturnType>::
+      fence(
+          typename policy_type::execution_space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          return_value);
+}
+
+// No Return Argument
+
+template <class PolicyType, class FunctorType>
+inline void parallel_reduce(
+    const std::string& label, const PolicyType& policy,
+    const FunctorType& functor,
+    std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value>* =
+        nullptr) {
+  using FunctorAnalysis =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+                            FunctorType>;
+  using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
+                                        typename FunctorAnalysis::value_type,
+                                        typename FunctorAnalysis::pointer_type>;
+
+  static_assert(
+      FunctorAnalysis::has_final_member_function,
+      "Calling parallel_reduce without either return value or final function.");
+
+  using result_view_type =
+      Kokkos::View<value_type, Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
+  result_view_type result_view;
+
+  Impl::ParallelReduceAdaptor<PolicyType, FunctorType,
+                              result_view_type>::execute(label, policy, functor,
+                                                         result_view);
+}
+
+template <class PolicyType, class FunctorType>
+inline void parallel_reduce(
+    const PolicyType& policy, const FunctorType& functor,
+    std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value>* =
+        nullptr) {
+  using FunctorAnalysis =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+                            FunctorType>;
+  using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
+                                        typename FunctorAnalysis::value_type,
+                                        typename FunctorAnalysis::pointer_type>;
+
+  static_assert(
+      FunctorAnalysis::has_final_member_function,
+      "Calling parallel_reduce without either return value or final function.");
+
+  using result_view_type =
+      Kokkos::View<value_type, Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
+  result_view_type result_view;
+
+  Impl::ParallelReduceAdaptor<PolicyType, FunctorType,
+                              result_view_type>::execute("", policy, functor,
+                                                         result_view);
+}
+
+template <class FunctorType>
+inline void parallel_reduce(const size_t& policy, const FunctorType& functor) {
+  using policy_type =
+      typename Impl::ParallelReducePolicyType<void, size_t,
+                                              FunctorType>::policy_type;
+  using FunctorAnalysis =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, policy_type,
+                            FunctorType>;
+  using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
+                                        typename FunctorAnalysis::value_type,
+                                        typename FunctorAnalysis::pointer_type>;
+
+  static_assert(
+      FunctorAnalysis::has_final_member_function,
+      "Calling parallel_reduce without either return value or final function.");
+
+  using result_view_type =
+      Kokkos::View<value_type, Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
+  result_view_type result_view;
+
+  Impl::ParallelReduceAdaptor<policy_type, FunctorType,
+                              result_view_type>::execute("",
+                                                         policy_type(0, policy),
+                                                         functor, result_view);
+}
+
+template <class FunctorType>
+inline void parallel_reduce(const std::string& label, const size_t& policy,
+                            const FunctorType& functor) {
+  using policy_type =
+      typename Impl::ParallelReducePolicyType<void, size_t,
+                                              FunctorType>::policy_type;
+  using FunctorAnalysis =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, policy_type,
+                            FunctorType>;
+  using value_type = std::conditional_t<(FunctorAnalysis::StaticValueSize != 0),
+                                        typename FunctorAnalysis::value_type,
+                                        typename FunctorAnalysis::pointer_type>;
+
+  static_assert(
+      FunctorAnalysis::has_final_member_function,
+      "Calling parallel_reduce without either return value or final function.");
+
+  using result_view_type =
+      Kokkos::View<value_type, Kokkos::HostSpace, Kokkos::MemoryUnmanaged>;
+  result_view_type result_view;
+
+  Impl::ParallelReduceAdaptor<policy_type, FunctorType,
+                              result_view_type>::execute(label,
+                                                         policy_type(0, policy),
+                                                         functor, result_view);
+}
+
+}  // namespace Kokkos
+
+#endif  // KOKKOS_PARALLEL_REDUCE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_PointerOwnership.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_PointerOwnership.hpp
new file mode 100644 (file)
index 0000000..41b18a8
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_IMPL_POINTEROWNERSHIP_HPP
+#define KOKKOS_IMPL_POINTEROWNERSHIP_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/// Trivial wrapper for raw pointers that express ownership.
+template <class T>
+using OwningRawPtr = T*;
+
+/// Trivial wrapper for raw pointers that do not express ownership.
+template <class T>
+using ObservingRawPtr = T*;
+
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_IMPL_POINTEROWNERSHIP_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Profiling_ProfileSection.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Profiling_ProfileSection.hpp
new file mode 100644 (file)
index 0000000..266605c
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ //@HEADER
+ // ************************************************************************
+ //
+ //                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+ //
+ // Under the terms of Contract DE-NA0003525 with NTESS,
+ // the U.S. Government retains certain rights in this software.
+ //
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are
+ // met:
+ //
+ // 1. Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ //
+ // 2. Redistributions in binary form must reproduce the above copyright
+ // notice, this list of conditions and the following disclaimer in the
+ // documentation and/or other materials provided with the distribution.
+ //
+ // 3. Neither the name of the Corporation nor the names of the
+ // contributors may be used to endorse or promote products derived from
+ // this software without specific prior written permission.
+ //
+ // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ //
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+ //
+ // ************************************************************************
+ //@HEADER
+ */
+
+#ifndef KOKKOSP_PROFILE_SECTION_HPP
+#define KOKKOSP_PROFILE_SECTION_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_PROFILING_PROFILESECTION
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+
+#include <string>
+
+namespace Kokkos {
+namespace Profiling {
+
+class ProfilingSection {
+ public:
+  ProfilingSection(ProfilingSection const&) = delete;
+  ProfilingSection& operator=(ProfilingSection const&) = delete;
+
+  ProfilingSection(const std::string& sectionName)
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+      : secName(sectionName)
+#endif
+  {
+    if (Kokkos::Profiling::profileLibraryLoaded()) {
+      Kokkos::Profiling::createProfileSection(sectionName, &secID);
+    }
+  }
+
+  void start() {
+    if (Kokkos::Profiling::profileLibraryLoaded()) {
+      Kokkos::Profiling::startSection(secID);
+    }
+  }
+
+  void stop() {
+    if (Kokkos::Profiling::profileLibraryLoaded()) {
+      Kokkos::Profiling::stopSection(secID);
+    }
+  }
+
+  ~ProfilingSection() {
+    if (Kokkos::Profiling::profileLibraryLoaded()) {
+      Kokkos::Profiling::destroyProfileSection(secID);
+    }
+  }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  KOKKOS_DEPRECATED std::string getName() { return secName; }
+
+  KOKKOS_DEPRECATED uint32_t getSectionID() { return secID; }
+#endif
+
+ protected:
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  const std::string secName;
+#endif
+  uint32_t secID;
+};
+
+}  // namespace Profiling
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_CORE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_PROFILING_PROFILESECTION
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Rank.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Rank.hpp
new file mode 100644 (file)
index 0000000..025cf51
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_KOKKOS_RANK_HPP
+#define KOKKOS_KOKKOS_RANK_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Layout.hpp>  // Iterate
+
+namespace Kokkos {
+
+// Iteration Pattern
+template <unsigned N, Iterate OuterDir = Iterate::Default,
+          Iterate InnerDir = Iterate::Default>
+struct Rank {
+  static_assert(N != 0u, "Kokkos Error: rank 0 undefined");
+  static_assert(N != 1u,
+                "Kokkos Error: rank 1 is not a multi-dimensional range");
+  static_assert(N < 7u, "Kokkos Error: Unsupported rank...");
+
+  using iteration_pattern = Rank<N, OuterDir, InnerDir>;
+
+  static constexpr int rank                = N;
+  static constexpr Iterate outer_direction = OuterDir;
+  static constexpr Iterate inner_direction = InnerDir;
+};
+
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_RANK_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_SYCL.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_SYCL.hpp
new file mode 100644 (file)
index 0000000..a7f1696
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_SYCL_HPP
+#define KOKKOS_SYCL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_SYCL
+#include <CL/sycl.hpp>
+#include <Kokkos_SYCL_Space.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+class SYCLInternal;
+}
+
+/// \class SYCL
+/// \brief Kokkos device for multicore processors in the host memory space.
+class SYCL {
+ public:
+  //------------------------------------
+  //! \name Type declarations that all Kokkos devices must provide.
+  //@{
+
+  //! Tag this class as a kokkos execution space
+  using execution_space = SYCL;
+  using memory_space    = SYCLDeviceUSMSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+
+  using array_layout = LayoutLeft;
+  using size_type    = memory_space::size_type;
+
+  using scratch_memory_space = ScratchMemorySpace<SYCL>;
+
+  SYCL();
+  explicit SYCL(const sycl::queue&);
+
+  uint32_t impl_instance_id() const noexcept {
+    return m_space_instance->impl_get_instance_id();
+  }
+
+  sycl::queue& sycl_queue() const noexcept {
+    return *m_space_instance->m_queue;
+  }
+
+  //@}
+  //------------------------------------
+  //! \name Functions that all Kokkos devices must implement.
+  //@{
+
+  KOKKOS_INLINE_FUNCTION static int in_parallel() {
+#if defined(__SYCL_DEVICE_ONLY__)
+    return true;
+#else
+    return false;
+#endif
+  }
+
+  /** \brief  Set the device in a "sleep" state. */
+  static bool sleep();
+
+  /** \brief Wake the device from the 'sleep' state. A noop for OpenMP. */
+  static bool wake();
+
+  /** \brief Wait until all dispatched functors complete. A noop for OpenMP. */
+  static void impl_static_fence(const std::string& name);
+
+  void fence(
+      const std::string& name =
+          "Kokkos::Experimental::SYCL::fence: Unnamed Instance Fence") const;
+
+  /// \brief Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  /// \brief Free any resources being consumed by the device.
+  static void impl_finalize();
+
+  static void impl_initialize(InitializationSettings const&);
+
+  int sycl_device() const;
+
+  static bool impl_is_initialized();
+
+  static int concurrency();
+  static const char* name();
+
+  inline Impl::SYCLInternal* impl_internal_space_instance() const {
+    return m_space_instance.get();
+  }
+
+ private:
+  static std::ostream& impl_sycl_info(std::ostream& os,
+                                      const sycl::device& device);
+
+  Kokkos::Impl::HostSharedPtr<Impl::SYCLInternal> m_space_instance;
+};
+
+}  // namespace Experimental
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<Kokkos::Experimental::SYCL> {
+  /// \brief An ID to differentiate (for example) Serial from OpenMP in Tooling
+  static constexpr DeviceType id = DeviceType::SYCL;
+  static int device_id(const Kokkos::Experimental::SYCL& exec) {
+    return exec.sycl_device();
+  }
+};
+}  // namespace Experimental
+}  // namespace Tools
+
+namespace Experimental {
+template <class... Args>
+std::vector<SYCL> partition_space(const SYCL& sycl_space, Args...) {
+#ifdef __cpp_fold_expressions
+  static_assert(
+      (... && std::is_arithmetic_v<Args>),
+      "Kokkos Error: partitioning arguments must be integers or floats");
+#endif
+
+  sycl::context context = sycl_space.sycl_queue().get_context();
+  sycl::device device =
+      sycl_space.impl_internal_space_instance()->m_queue->get_device();
+  std::vector<SYCL> instances;
+  instances.reserve(sizeof...(Args));
+  for (unsigned int i = 0; i < sizeof...(Args); ++i)
+    instances.emplace_back(sycl::queue(context, device));
+  return instances;
+}
+
+template <class T>
+std::vector<SYCL> partition_space(const SYCL& sycl_space,
+                                  std::vector<T>& weights) {
+  static_assert(
+      std::is_arithmetic<T>::value,
+      "Kokkos Error: partitioning arguments must be integers or floats");
+
+  sycl::context context = sycl_space.sycl_queue().get_context();
+  sycl::device device =
+      sycl_space.impl_internal_space_instance()->m_queue->get_device();
+  std::vector<SYCL> instances;
+  instances.reserve(weights.size());
+  for (unsigned int i = 0; i < weights.size(); ++i)
+    instances.emplace_back(sycl::queue(context, device));
+  return instances;
+}
+}  // namespace Experimental
+
+}  // namespace Kokkos
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_SYCL_Space.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_SYCL_Space.hpp
new file mode 100644 (file)
index 0000000..e147d04
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_SYCLSPACE_HPP
+#define KOKKOS_SYCLSPACE_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+
+#ifdef KOKKOS_ENABLE_SYCL
+#include <Kokkos_Concepts.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <SYCL/Kokkos_SYCL_Instance.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+namespace Kokkos {
+
+namespace Impl {
+template <typename T>
+struct is_sycl_type_space : public std::false_type {};
+}  // namespace Impl
+
+namespace Experimental {
+
+class SYCLDeviceUSMSpace {
+ public:
+  using execution_space = SYCL;
+  using memory_space    = SYCLDeviceUSMSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using size_type       = Impl::SYCLInternal::size_type;
+
+  SYCLDeviceUSMSpace();
+  explicit SYCLDeviceUSMSpace(sycl::queue queue);
+
+  void* allocate(const SYCL& exec_space,
+                 const std::size_t arg_alloc_size) const;
+  void* allocate(const SYCL& exec_space, const char* arg_label,
+                 const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+  void* allocate(const std::size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  void deallocate(void* const arg_alloc_ptr,
+                  const std::size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+
+ public:
+  static constexpr const char* name() { return "SYCLDeviceUSM"; };
+
+ private:
+  sycl::queue m_queue;
+};
+
+class SYCLSharedUSMSpace {
+ public:
+  using execution_space = SYCL;
+  using memory_space    = SYCLSharedUSMSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using size_type       = Impl::SYCLInternal::size_type;
+
+  SYCLSharedUSMSpace();
+  explicit SYCLSharedUSMSpace(sycl::queue queue);
+
+  void* allocate(const SYCL& exec_space,
+                 const std::size_t arg_alloc_size) const;
+  void* allocate(const SYCL& exec_space, const char* arg_label,
+                 const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+  void* allocate(const std::size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  void deallocate(void* const arg_alloc_ptr,
+                  const std::size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+
+ public:
+  static constexpr const char* name() { return "SYCLSharedUSM"; };
+
+ private:
+  sycl::queue m_queue;
+};
+
+class SYCLHostUSMSpace {
+ public:
+  using execution_space = HostSpace::execution_space;
+  using memory_space    = SYCLHostUSMSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using size_type       = Impl::SYCLInternal::size_type;
+
+  SYCLHostUSMSpace();
+  explicit SYCLHostUSMSpace(sycl::queue queue);
+
+  void* allocate(const SYCL& exec_space,
+                 const std::size_t arg_alloc_size) const;
+  void* allocate(const SYCL& exec_space, const char* arg_label,
+                 const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+  void* allocate(const std::size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  void deallocate(void* const arg_alloc_ptr,
+                  const std::size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+ private:
+  template <class, class, class, class>
+  friend class LogicalMemorySpace;
+
+ public:
+  static constexpr const char* name() { return "SYCLHostUSM"; };
+
+ private:
+  sycl::queue m_queue;
+};
+
+}  // namespace Experimental
+
+namespace Impl {
+
+template <>
+struct is_sycl_type_space<Kokkos::Experimental::SYCLDeviceUSMSpace>
+    : public std::true_type {};
+
+template <>
+struct is_sycl_type_space<Kokkos::Experimental::SYCLSharedUSMSpace>
+    : public std::true_type {};
+
+template <>
+struct is_sycl_type_space<Kokkos::Experimental::SYCLHostUSMSpace>
+    : public std::true_type {};
+
+static_assert(Kokkos::Impl::MemorySpaceAccess<
+                  Kokkos::Experimental::SYCLDeviceUSMSpace,
+                  Kokkos::Experimental::SYCLDeviceUSMSpace>::assignable,
+              "");
+
+static_assert(Kokkos::Impl::MemorySpaceAccess<
+                  Kokkos::Experimental::SYCLSharedUSMSpace,
+                  Kokkos::Experimental::SYCLSharedUSMSpace>::assignable,
+              "");
+
+static_assert(Kokkos::Impl::MemorySpaceAccess<
+                  Kokkos::Experimental::SYCLDeviceUSMSpace,
+                  Kokkos::Experimental::SYCLDeviceUSMSpace>::assignable,
+              "");
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace,
+                         Kokkos::Experimental::SYCLDeviceUSMSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace,
+                         Kokkos::Experimental::SYCLSharedUSMSpace> {
+  // HostSpace::execution_space != SYCLSharedUSMSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::HostSpace,
+                         Kokkos::Experimental::SYCLHostUSMSpace> {
+  // HostSpace::execution_space ==
+  // Experimental::SYCLHostUSMSpace::execution_space
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                         Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                         Kokkos::Experimental::SYCLSharedUSMSpace> {
+  // SYCLDeviceUSMSpace::execution_space == SYCLSharedUSMSpace::execution_space
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                         Kokkos::Experimental::SYCLHostUSMSpace> {
+  // Experimental::SYCLDeviceUSMSpace::execution_space !=
+  // Experimental::SYCLHostUSMSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool {
+    accessible = true
+  };  // Experimental::SYCLDeviceUSMSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+//----------------------------------------
+// SYCLSharedUSMSpace::execution_space == SYCL
+// SYCLSharedUSMSpace accessible to both SYCL and Host
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLSharedUSMSpace,
+                         Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };  // SYCL cannot access HostSpace
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLSharedUSMSpace,
+                         Kokkos::Experimental::SYCLDeviceUSMSpace> {
+  // SYCLSharedUSMSpace::execution_space == SYCLDeviceUSMSpace::execution_space
+  // Can access SYCLSharedUSMSpace from Host but cannot access
+  // SYCLDeviceUSMSpace from Host
+  enum : bool { assignable = false };
+
+  // SYCLSharedUSMSpace::execution_space can access SYCLDeviceUSMSpace
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLSharedUSMSpace,
+                         Kokkos::Experimental::SYCLHostUSMSpace> {
+  // Experimental::SYCLSharedUSMSpace::execution_space !=
+  // Experimental::SYCLHostUSMSpace::execution_space
+  enum : bool { assignable = false };
+  enum : bool {
+    accessible = true
+  };  // Experimental::SYCLSharedUSMSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLHostUSMSpace,
+                         Kokkos::HostSpace> {
+  enum : bool { assignable = false };  // Cannot access from SYCL
+  enum : bool {
+    accessible = true
+  };  // Experimental::SYCLHostUSMSpace::execution_space
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLHostUSMSpace,
+                         Kokkos::Experimental::SYCLDeviceUSMSpace> {
+  enum : bool { assignable = false };  // Cannot access from Host
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Experimental::SYCLHostUSMSpace,
+                         Kokkos::Experimental::SYCLSharedUSMSpace> {
+  enum : bool { assignable = false };  // different execution_space
+  enum : bool { accessible = true };   // same accessibility
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct MemorySpaceAccess<
+    Kokkos::Experimental::SYCLDeviceUSMSpace,
+    Kokkos::ScratchMemorySpace<Kokkos::Experimental::SYCL>> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+}  // namespace Impl
+
+namespace Impl {
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, void>
+    : public HostInaccessibleSharedAllocationRecordCommon<
+          Kokkos::Experimental::SYCLDeviceUSMSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<
+      Kokkos::Experimental::SYCLDeviceUSMSpace>;
+  friend class HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::SYCLDeviceUSMSpace>;
+  using base_t = HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::SYCLDeviceUSMSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord(SharedAllocationRecord&&)      = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+  static RecordBase s_root_record;
+#endif
+
+  const Kokkos::Experimental::SYCLDeviceUSMSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::Experimental::SYCLDeviceUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::SYCL& exec_space,
+      const Kokkos::Experimental::SYCLDeviceUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::SYCLDeviceUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+};
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace, void>
+    : public SharedAllocationRecordCommon<
+          Kokkos::Experimental::SYCLSharedUSMSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<
+      Kokkos::Experimental::SYCLSharedUSMSpace>;
+  using base_t =
+      SharedAllocationRecordCommon<Kokkos::Experimental::SYCLSharedUSMSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord(SharedAllocationRecord&&)      = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
+
+  static RecordBase s_root_record;
+
+  const Kokkos::Experimental::SYCLSharedUSMSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+
+  SharedAllocationRecord() = default;
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::SYCL& exec_space,
+      const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+};
+
+template <>
+class SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace, void>
+    : public SharedAllocationRecordCommon<
+          Kokkos::Experimental::SYCLHostUSMSpace> {
+ private:
+  friend class SharedAllocationRecordCommon<
+      Kokkos::Experimental::SYCLHostUSMSpace>;
+  using base_t =
+      SharedAllocationRecordCommon<Kokkos::Experimental::SYCLHostUSMSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord(SharedAllocationRecord&&)      = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
+
+  static RecordBase s_root_record;
+
+  const Kokkos::Experimental::SYCLHostUSMSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+
+  SharedAllocationRecord() = default;
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::SYCL& exec_space,
+      const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &base_t::deallocate);
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_ScratchSpace.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_ScratchSpace.hpp
new file mode 100644 (file)
index 0000000..3e37eb6
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_SCRATCHSPACE_HPP
+#define KOKKOS_SCRATCHSPACE_HPP
+
+#include <cstdio>
+#include <cstddef>
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Concepts.hpp>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+/** \brief  Scratch memory space associated with an execution space.
+ *
+ */
+template <class ExecSpace>
+class ScratchMemorySpace {
+  static_assert(
+      is_execution_space<ExecSpace>::value,
+      "Instantiating ScratchMemorySpace on non-execution-space type.");
+
+ public:
+  // Alignment of memory chunks returned by 'get'
+  // must be a power of two
+  enum { ALIGN = 8 };
+
+ private:
+  mutable char* m_iter_L0 = nullptr;
+  mutable char* m_iter_L1 = nullptr;
+  char* m_end_L0          = nullptr;
+  char* m_end_L1          = nullptr;
+
+  mutable int m_multiplier    = 0;
+  mutable int m_offset        = 0;
+  mutable int m_default_level = 0;
+
+  enum { MASK = ALIGN - 1 };  // Alignment used by View::shmem_size
+
+ public:
+  //! Tag this class as a memory space
+  using memory_space    = ScratchMemorySpace<ExecSpace>;
+  using execution_space = ExecSpace;
+  //! This execution space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  using array_layout = typename ExecSpace::array_layout;
+  using size_type    = typename ExecSpace::size_type;
+
+  static constexpr const char* name() { return "ScratchMemorySpace"; }
+
+  template <typename IntType>
+  KOKKOS_INLINE_FUNCTION static IntType align(const IntType& size) {
+    return (size + MASK) & ~MASK;
+  }
+
+  template <typename IntType>
+  KOKKOS_INLINE_FUNCTION void* get_shmem(const IntType& size,
+                                         int level = -1) const {
+    return get_shmem_common</*aligned*/ false>(size, 1, level);
+  }
+
+  template <typename IntType>
+  KOKKOS_INLINE_FUNCTION void* get_shmem_aligned(const IntType& size,
+                                                 const ptrdiff_t alignment,
+                                                 int level = -1) const {
+    return get_shmem_common</*aligned*/ true>(size, alignment, level);
+  }
+
+ private:
+  template <bool aligned, typename IntType>
+  KOKKOS_INLINE_FUNCTION void* get_shmem_common(const IntType& size,
+                                                const ptrdiff_t alignment,
+                                                int level = -1) const {
+    if (level == -1) level = m_default_level;
+    auto& m_iter              = (level == 0) ? m_iter_L0 : m_iter_L1;
+    auto& m_end               = (level == 0) ? m_end_L0 : m_end_L1;
+    char* previous            = m_iter;
+    const ptrdiff_t missalign = size_t(m_iter) % alignment;
+    if (missalign) m_iter += alignment - missalign;
+
+    void* tmp = m_iter + m_offset * (aligned ? size : align(size));
+    if (m_end < (m_iter += (aligned ? size : align(size)) * m_multiplier)) {
+      m_iter = previous;  // put it back like it was
+#ifdef KOKKOS_ENABLE_DEBUG
+      // mfh 23 Jun 2015: printf call consumes 25 registers
+      // in a CUDA build, so only print in debug mode.  The
+      // function still returns nullptr if not enough memory.
+      KOKKOS_IMPL_DO_NOT_USE_PRINTF(
+          "ScratchMemorySpace<...>::get_shmem: Failed to allocate "
+          "%ld byte(s); remaining capacity is %ld byte(s)\n",
+          long(size), long(m_end - m_iter));
+#endif  // KOKKOS_ENABLE_DEBUG
+      tmp = nullptr;
+    }
+    return tmp;
+  }
+
+ public:
+  KOKKOS_DEFAULTED_FUNCTION
+  ScratchMemorySpace() = default;
+
+  template <typename IntType>
+  KOKKOS_INLINE_FUNCTION ScratchMemorySpace(void* ptr_L0,
+                                            const IntType& size_L0,
+                                            void* ptr_L1           = nullptr,
+                                            const IntType& size_L1 = 0)
+      : m_iter_L0(static_cast<char*>(ptr_L0)),
+        m_iter_L1(static_cast<char*>(ptr_L1)),
+        m_end_L0(static_cast<char*>(ptr_L0) + size_L0),
+        m_end_L1(static_cast<char*>(ptr_L1) + size_L1),
+        m_multiplier(1),
+        m_offset(0),
+        m_default_level(0) {}
+
+  KOKKOS_INLINE_FUNCTION
+  const ScratchMemorySpace& set_team_thread_mode(const int& level,
+                                                 const int& multiplier,
+                                                 const int& offset) const {
+    m_default_level = level;
+    m_multiplier    = multiplier;
+    m_offset        = offset;
+    return *this;
+  }
+};
+
+}  // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_SCRATCHSPACE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Serial.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Serial.hpp
new file mode 100644 (file)
index 0000000..ffdd1e9
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_Serial.hpp
+/// \brief Declaration and definition of Kokkos::Serial device.
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_SERIAL_HPP
+#define KOKKOS_SERIAL_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_SERIAL)
+
+#include <cstddef>
+#include <iosfwd>
+#include <mutex>
+#include <thread>
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_TaskScheduler.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+#include <impl/Kokkos_FunctorAnalysis.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_HostSharedPtr.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+namespace Kokkos {
+
+namespace Impl {
+class SerialInternal {
+ public:
+  SerialInternal() = default;
+
+  bool is_initialized();
+
+  void initialize();
+
+  void finalize();
+
+  static SerialInternal& singleton();
+
+  std::mutex m_thread_team_data_mutex;
+
+  // Resize thread team data scratch memory
+  void resize_thread_team_data(size_t pool_reduce_bytes,
+                               size_t team_reduce_bytes,
+                               size_t team_shared_bytes,
+                               size_t thread_local_bytes);
+
+  HostThreadTeamData m_thread_team_data;
+  bool m_is_initialized = false;
+};
+}  // namespace Impl
+
+/// \class Serial
+/// \brief Kokkos device for non-parallel execution
+///
+/// A "device" represents a parallel execution model.  It tells Kokkos
+/// how to parallelize the execution of kernels in a parallel_for or
+/// parallel_reduce.  For example, the Threads device uses
+/// C++11 threads on a CPU, the OpenMP device uses the OpenMP language
+/// extensions, and the Cuda device uses NVIDIA's CUDA programming
+/// model.  The Serial device executes "parallel" kernels
+/// sequentially.  This is useful if you really do not want to use
+/// threads, or if you want to explore different combinations of MPI
+/// and shared-memory parallel programming models.
+class Serial {
+ public:
+  //! \name Type declarations that all Kokkos devices must provide.
+  //@{
+
+  //! Tag this class as an execution space:
+  using execution_space = Serial;
+  //! This device's preferred memory space.
+  using memory_space = Kokkos::HostSpace;
+  //! The size_type alias best suited for this device.
+  using size_type = memory_space::size_type;
+  //! This execution space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  //! This device's preferred array layout.
+  using array_layout = LayoutRight;
+
+  /// \brief  Scratch memory space
+  using scratch_memory_space = ScratchMemorySpace<Kokkos::Serial>;
+
+  //@}
+
+  Serial();
+
+  /// \brief True if and only if this method is being called in a
+  ///   thread-parallel function.
+  ///
+  /// For the Serial device, this method <i>always</i> returns false,
+  /// because parallel_for or parallel_reduce with the Serial device
+  /// always execute sequentially.
+  inline static int in_parallel() { return false; }
+
+  /// \brief Wait until all dispatched functors complete.
+  ///
+  /// The parallel_for or parallel_reduce dispatch of a functor may
+  /// return asynchronously, before the functor completes.  This
+  /// method does not return until all dispatched functors on this
+  /// device have completed.
+  static void impl_static_fence(const std::string& name) {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Serial>(
+        name,
+        Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+            GlobalDeviceSynchronization,
+        []() {});  // TODO: correct device ID
+    Kokkos::memory_fence();
+  }
+
+  void fence(const std::string& name =
+                 "Kokkos::Serial::fence: Unnamed Instance Fence") const {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Serial>(
+        name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1},
+        []() {});  // TODO: correct device ID
+    Kokkos::memory_fence();
+  }
+
+  /** \brief  Return the maximum amount of concurrency.  */
+  static int concurrency() { return 1; }
+
+  //! Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  static void impl_initialize(InitializationSettings const&);
+
+  static bool impl_is_initialized();
+
+  //! Free any resources being consumed by the device.
+  static void impl_finalize();
+
+  //--------------------------------------------------------------------------
+
+  inline static int impl_thread_pool_size(int = 0) { return 1; }
+  KOKKOS_INLINE_FUNCTION static int impl_thread_pool_rank() { return 0; }
+
+  //--------------------------------------------------------------------------
+
+  KOKKOS_INLINE_FUNCTION static unsigned impl_hardware_thread_id() {
+    return impl_thread_pool_rank();
+  }
+  inline static unsigned impl_max_hardware_threads() {
+    return impl_thread_pool_size(0);
+  }
+
+  uint32_t impl_instance_id() const noexcept { return 1; }
+
+  static const char* name();
+
+  Impl::SerialInternal* impl_internal_space_instance() const {
+#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
+    return m_space_instance;
+#else
+    return m_space_instance.get();
+#endif
+  }
+
+ private:
+#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
+  Impl::SerialInternal* m_space_instance;
+#else
+  Kokkos::Impl::HostSharedPtr<Impl::SerialInternal> m_space_instance;
+#endif
+  //--------------------------------------------------------------------------
+};
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<Serial> {
+  static constexpr DeviceType id = DeviceType::Serial;
+  static int device_id(const Serial&) { return 0; }
+};
+}  // namespace Experimental
+}  // namespace Tools
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+// We only need to provide a specialization for Serial if there is a host
+// parallel execution space since the specialization for
+// DefaultHostExecutionSpace is defined elsewhere.
+struct DummyExecutionSpace;
+template <class DT, class... DP>
+struct ZeroMemset<
+    std::conditional_t<!std::is_same<Serial, DefaultHostExecutionSpace>::value,
+                       Serial, DummyExecutionSpace>,
+    DT, DP...> : public ZeroMemset<DefaultHostExecutionSpace, DT, DP...> {
+  using Base = ZeroMemset<DefaultHostExecutionSpace, DT, DP...>;
+  using Base::Base;
+
+  ZeroMemset(const Serial&, const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type& value)
+      : Base(dst, value) {}
+};
+
+template <>
+struct MemorySpaceAccess<Kokkos::Serial::memory_space,
+                         Kokkos::Serial::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#include <Serial/Kokkos_Serial_Parallel_Range.hpp>
+#include <Serial/Kokkos_Serial_Parallel_MDRange.hpp>
+#include <Serial/Kokkos_Serial_Parallel_Team.hpp>
+#include <Serial/Kokkos_Serial_Task.hpp>
+#include <Serial/Kokkos_Serial_UniqueToken.hpp>
+
+#endif  // defined( KOKKOS_ENABLE_SERIAL )
+#endif  /* #define KOKKOS_SERIAL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_TaskPolicy.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_TaskPolicy.hpp
new file mode 100644 (file)
index 0000000..9751fab
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// For backward compatibility:
+#include <Kokkos_Macros.hpp>
+
+KOKKOS_IMPL_WARNING(
+    "This file is deprecated. Use <Kokkos_TaskScheduler.hpp> instead.")
+
+#include <Kokkos_TaskScheduler.hpp>
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_TaskScheduler.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_TaskScheduler.hpp
new file mode 100644 (file)
index 0000000..c3453b7
--- /dev/null
@@ -0,0 +1,708 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_TASKSCHEDULER_HPP
+#define KOKKOS_TASKSCHEDULER_HPP
+
+//----------------------------------------------------------------------------
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_TaskScheduler_fwd.hpp>
+//----------------------------------------------------------------------------
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <Kokkos_Future.hpp>
+#include <impl/Kokkos_TaskQueue.hpp>
+#include <impl/Kokkos_SingleTaskQueue.hpp>
+#include <impl/Kokkos_TaskQueueMultiple.hpp>
+#include <impl/Kokkos_TaskPolicyData.hpp>
+#include <impl/Kokkos_TaskTeamMember.hpp>
+#include <impl/Kokkos_SimpleTaskScheduler.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class, class>
+class TaskExec;
+
+}  // end namespace Impl
+
+template <class ExecSpace, class QueueType>
+class BasicTaskScheduler : public Impl::TaskSchedulerBase {
+ public:
+  using scheduler_type      = BasicTaskScheduler;
+  using execution_space     = ExecSpace;
+  using queue_type          = QueueType;
+  using memory_space        = typename queue_type::memory_space;
+  using memory_pool         = typename queue_type::memory_pool;
+  using specialization      = Impl::TaskQueueSpecialization<BasicTaskScheduler>;
+  using member_type         = typename specialization::member_type;
+  using team_scheduler_type = BasicTaskScheduler;
+  template <class Functor>
+  using runnable_task_type =
+      Impl::Task<scheduler_type, typename Functor::value_type, Functor>;
+  template <class ValueType>
+  using future_type = Kokkos::BasicFuture<ValueType, BasicTaskScheduler>;
+  template <class FunctorType>
+  using future_type_for_functor = future_type<typename FunctorType::value_type>;
+
+ private:
+  using track_type = Kokkos::Impl::SharedAllocationTracker;
+  using task_base  = Impl::TaskBase;
+
+  track_type m_track;
+  queue_type* m_queue;
+
+  //----------------------------------------
+
+  template <typename, typename>
+  friend class Impl::TaskQueue;
+  template <typename>
+  friend struct Impl::TaskQueueSpecialization;
+  template <typename, typename>
+  friend class Impl::TaskQueueSpecializationConstrained;
+  template <typename, typename>
+  friend class Impl::TaskTeamMemberAdapter;
+  template <typename, typename>
+  friend class Impl::TaskExec;
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  BasicTaskScheduler(track_type arg_track, queue_type* arg_queue)
+      : m_track(std::move(arg_track)), m_queue(std::move(arg_queue)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  team_scheduler_type get_team_scheduler(int team_rank) const {
+    return {m_track, &m_queue->get_team_queue(team_rank)};
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  static constexpr task_base* _get_task_ptr(std::nullptr_t) { return nullptr; }
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION static constexpr task_base* _get_task_ptr(
+      future_type<ValueType>&& f) {
+    return f.m_task;
+  }
+
+  template <int TaskEnum, typename DepTaskType, typename FunctorType>
+  KOKKOS_FUNCTION
+      Kokkos::BasicFuture<typename FunctorType::value_type, scheduler_type>
+      _spawn_impl(DepTaskType* arg_predecessor_task, TaskPriority arg_priority,
+                  typename task_base::function_type arg_function,
+                  typename task_base::destroy_type /*arg_destroy*/,
+                  FunctorType&& arg_functor) {
+    using functor_future_type =
+        future_type_for_functor<std::decay_t<FunctorType>>;
+    using task_type =
+        Impl::Task<BasicTaskScheduler, typename functor_future_type::value_type,
+                   FunctorType>;
+
+    //----------------------------------------
+    // Give single-thread back-ends an opportunity to clear
+    // queue of ready tasks before allocating a new task
+
+    // TODO @tasking @optimization DSH re-enable this, maybe?
+    // specialization::iff_single_thread_recursive_execute(scheduler);
+
+    //----------------------------------------
+
+    functor_future_type f;
+
+    // Allocate task from memory pool
+
+    const size_t alloc_size =
+        m_queue->template spawn_allocation_size<FunctorType>();
+
+    void* task_storage = m_queue->allocate(alloc_size);
+
+    if (task_storage) {
+      // Placement new construction
+      // Reference count starts at two:
+      //   +1 for the matching decrement when task is complete
+      //   +1 for the future
+      f.m_task =
+          new (task_storage) task_type(std::forward<FunctorType>(arg_functor));
+
+      f.m_task->m_apply = arg_function;
+      // f.m_task->m_destroy    = arg_destroy;
+      f.m_task->m_queue      = m_queue;
+      f.m_task->m_next       = arg_predecessor_task;
+      f.m_task->m_ref_count  = 2;
+      f.m_task->m_alloc_size = alloc_size;
+      f.m_task->m_task_type  = TaskEnum;
+      f.m_task->m_priority   = (int16_t)arg_priority;
+
+      Kokkos::memory_fence();
+
+      // The dependence (if any) is processed immediately
+      // within the schedule function, as such the dependence's
+      // reference count does not need to be incremented for
+      // the assignment.
+
+      m_queue->schedule_runnable(f.m_task);
+      // This task may be updated or executed at any moment,
+      // even during the call to 'schedule'.
+    }
+
+    return f;
+  }
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  BasicTaskScheduler() : m_track(), m_queue(nullptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  BasicTaskScheduler(BasicTaskScheduler&& rhs) noexcept
+      : m_track(rhs.m_track),  // probably should be a move, but this is
+                               // deprecated code anyway
+        m_queue(std::move(rhs.m_queue)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  BasicTaskScheduler(BasicTaskScheduler const& rhs)
+      : m_track(rhs.m_track), m_queue(rhs.m_queue) {}
+
+  KOKKOS_INLINE_FUNCTION
+  BasicTaskScheduler& operator=(BasicTaskScheduler&& rhs) noexcept {
+    m_track = rhs.m_track;  // probably should be a move, but this is deprecated
+                            // code anyway
+    m_queue = std::move(rhs.m_queue);
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  BasicTaskScheduler& operator=(BasicTaskScheduler const& rhs) {
+    m_track = rhs.m_track;
+    m_queue = rhs.m_queue;
+    return *this;
+  }
+
+  explicit BasicTaskScheduler(memory_pool const& arg_memory_pool) noexcept
+      : m_track(), m_queue(nullptr) {
+    using record_type =
+        Kokkos::Impl::SharedAllocationRecord<memory_space,
+                                             typename queue_type::Destroy>;
+
+    record_type* record = record_type::allocate(
+        memory_space(), "Kokkos::TaskQueue", sizeof(queue_type));
+
+    m_queue = new (record->data()) queue_type(arg_memory_pool);
+
+    record->m_destroy.m_queue = m_queue;
+
+    m_track.assign_allocated_record_to_uninitialized(record);
+  }
+
+  BasicTaskScheduler(memory_space const& arg_memory_space,
+                     size_t const mempool_capacity,
+                     unsigned const mempool_min_block_size  // = 1u << 6
+                     ,
+                     unsigned const mempool_max_block_size  // = 1u << 10
+                     ,
+                     unsigned const mempool_superblock_size  // = 1u << 12
+                     )
+      : BasicTaskScheduler(memory_pool(
+            arg_memory_space, mempool_capacity, mempool_min_block_size,
+            mempool_max_block_size, mempool_superblock_size)) {}
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  queue_type& queue() const noexcept {
+    KOKKOS_EXPECTS(m_queue != nullptr);
+    return *m_queue;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  memory_pool* memory() const noexcept {
+    return m_queue ? &(m_queue->m_memory) : (memory_pool*)0;
+  }
+
+  //----------------------------------------
+  /**\brief  Allocation size for a spawned task */
+  template <typename FunctorType>
+  KOKKOS_FUNCTION size_t spawn_allocation_size() const {
+    return m_queue->template spawn_allocation_size<FunctorType>();
+  }
+
+  /**\brief  Allocation size for a when_all aggregate */
+  KOKKOS_FUNCTION
+  size_t when_all_allocation_size(int narg) const {
+    return m_queue->when_all_allocation_size(narg);
+  }
+
+  //----------------------------------------
+
+  template <int TaskEnum, typename DepFutureType, typename FunctorType>
+  KOKKOS_FUNCTION static Kokkos::BasicFuture<typename FunctorType::value_type,
+                                             scheduler_type>
+  spawn(Impl::TaskPolicyWithScheduler<TaskEnum, scheduler_type, DepFutureType>&&
+            arg_policy,
+        typename task_base::function_type arg_function,
+        typename task_base::destroy_type arg_destroy,
+        FunctorType&& arg_functor) {
+    return std::move(arg_policy.scheduler())
+        .template _spawn_impl<TaskEnum>(
+            _get_task_ptr(std::move(arg_policy.predecessor())),
+            arg_policy.priority(), arg_function, arg_destroy,
+            std::forward<FunctorType>(arg_functor));
+  }
+
+  template <int TaskEnum, typename DepFutureType, typename FunctorType>
+  KOKKOS_FUNCTION future_type_for_functor<std::decay_t<FunctorType>> spawn(
+      Impl::TaskPolicyWithPredecessor<TaskEnum, DepFutureType>&& arg_policy,
+      FunctorType&& arg_functor) {
+    using task_type = runnable_task_type<FunctorType>;
+    typename task_type::function_type const ptr = task_type::apply;
+    typename task_type::destroy_type const dtor = task_type::destroy;
+
+    return _spawn_impl<TaskEnum>(
+        _get_task_ptr(std::move(arg_policy).predecessor()),
+        arg_policy.priority(), ptr, dtor,
+        std::forward<FunctorType>(arg_functor));
+  }
+
+  template <typename FunctorType, typename ValueType, typename Scheduler>
+  KOKKOS_FUNCTION static void respawn(
+      FunctorType* arg_self,
+      BasicFuture<ValueType, Scheduler> const& arg_dependence,
+      TaskPriority const& arg_priority) {
+    // Precondition: task is in Executing state
+
+    using value_type = typename FunctorType::value_type;
+    using task_type  = Impl::Task<BasicTaskScheduler, value_type, FunctorType>;
+
+    task_type* const task = static_cast<task_type*>(arg_self);
+
+    task->m_priority = static_cast<int>(arg_priority);
+
+    task->add_dependence(arg_dependence.m_task);
+
+    // Postcondition: task is in Executing-Respawn state
+  }
+
+  template <typename FunctorType>
+  KOKKOS_FUNCTION static void respawn(FunctorType* arg_self,
+                                      BasicTaskScheduler const&,
+                                      TaskPriority const& arg_priority) {
+    // Precondition: task is in Executing state
+
+    using value_type = typename FunctorType::value_type;
+    using task_type  = Impl::Task<BasicTaskScheduler, value_type, FunctorType>;
+
+    task_type* const task = static_cast<task_type*>(arg_self);
+
+    task->m_priority = static_cast<int>(arg_priority);
+
+    task->add_dependence(nullptr);
+
+    // Postcondition: task is in Executing-Respawn state
+  }
+
+  //----------------------------------------
+  /**\brief  Return a future that is complete
+   *         when all input futures are complete.
+   */
+  template <typename ValueType>
+  KOKKOS_FUNCTION BasicFuture<void, scheduler_type> when_all(
+      BasicFuture<ValueType, BasicTaskScheduler> const arg[], int narg) {
+    future_type<void> f;
+
+    if (narg) {
+      queue_type* q = m_queue;
+
+      // BasicTaskScheduler const* scheduler_ptr = nullptr;
+
+      for (int i = 0; i < narg; ++i) {
+        task_base* const t = arg[i].m_task;
+        if (nullptr != t) {
+          // Increment reference count to track subsequent assignment.
+          // This likely has to be SeqCst
+          Kokkos::Impl::desul_atomic_inc(&(t->m_ref_count),
+                                         Kokkos::Impl::MemoryOrderSeqCst(),
+                                         Kokkos::Impl::MemoryScopeDevice());
+          if (q != static_cast<queue_type const*>(t->m_queue)) {
+            Kokkos::abort(
+                "Kokkos when_all Futures must be in the same scheduler");
+          }
+        }
+      }
+
+      if (q != nullptr) {  // this should probably handle the queue == 0 case,
+                           // but this is deprecated code anyway
+
+        size_t const alloc_size = q->when_all_allocation_size(narg);
+
+        f.m_task = reinterpret_cast<task_base*>(q->allocate(alloc_size));
+        // f.m_scheduler = *scheduler_ptr;
+
+        if (f.m_task) {
+          // Reference count starts at two:
+          // +1 to match decrement when task completes
+          // +1 for the future
+
+          new (f.m_task) task_base();
+
+          f.m_task->m_queue      = q;
+          f.m_task->m_ref_count  = 2;
+          f.m_task->m_alloc_size = static_cast<int32_t>(alloc_size);
+          f.m_task->m_dep_count  = narg;
+          f.m_task->m_task_type  = task_base::Aggregate;
+
+          // Assign dependences, reference counts were already incremented
+
+          task_base* volatile* const dep = f.m_task->aggregate_dependences();
+
+          for (int i = 0; i < narg; ++i) {
+            dep[i] = arg[i].m_task;
+          }
+
+          Kokkos::memory_fence();
+
+          q->schedule_aggregate(f.m_task);
+          // this when_all may be processed at any moment
+        }
+      }
+    }
+
+    return f;
+  }
+
+  template <class F>
+  KOKKOS_FUNCTION BasicFuture<void, scheduler_type> when_all(int narg,
+                                                             F const func) {
+    using input_type = decltype(func(0));
+
+    static_assert(is_future<input_type>::value,
+                  "Functor must return a Kokkos::Future");
+
+    future_type<void> f;
+
+    if (0 == narg) return f;
+
+    size_t const alloc_size = m_queue->when_all_allocation_size(narg);
+
+    f.m_task = reinterpret_cast<task_base*>(m_queue->allocate(alloc_size));
+
+    if (f.m_task) {
+      // Reference count starts at two:
+      // +1 to match decrement when task completes
+      // +1 for the future
+
+      new (f.m_task) task_base();
+      // f.m_scheduler = *this;
+
+      // f.m_task->m_scheduler = &f.m_scheduler;
+      f.m_task->m_queue      = m_queue;
+      f.m_task->m_ref_count  = 2;
+      f.m_task->m_alloc_size = static_cast<int32_t>(alloc_size);
+      f.m_task->m_dep_count  = narg;
+      f.m_task->m_task_type  = task_base::Aggregate;
+      // f.m_task->m_apply = nullptr;
+      // f.m_task->m_destroy = nullptr;
+
+      // Assign dependences, reference counts were already incremented
+
+      task_base* volatile* const dep = f.m_task->aggregate_dependences();
+
+      for (int i = 0; i < narg; ++i) {
+        const input_type arg_f = func(i);
+        if (nullptr != arg_f.m_task) {
+          // Not scheduled, so task scheduler is not yet set
+          // if ( m_queue != static_cast< BasicTaskScheduler const * >(
+          // arg_f.m_task->m_scheduler )->m_queue ) {
+          //  Kokkos::abort("Kokkos when_all Futures must be in the same
+          //  scheduler" );
+          //}
+          // Increment reference count to track subsequent assignment.
+          // This increment likely has to be SeqCst
+          Kokkos::Impl::desul_atomic_inc(&(arg_f.m_task->m_ref_count),
+                                         Kokkos::Impl::MemoryOrderSeqCst(),
+                                         Kokkos::Impl::MemoryScopeDevice());
+          dep[i] = arg_f.m_task;
+        }
+      }
+
+      Kokkos::memory_fence();
+
+      m_queue->schedule_aggregate(f.m_task);
+      // this when_all may be processed at any moment
+    }
+    return f;
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  int allocation_capacity() const noexcept {
+    return m_queue->m_memory.capacity();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int allocated_task_count() const noexcept { return m_queue->m_count_alloc; }
+
+  KOKKOS_INLINE_FUNCTION
+  int allocated_task_count_max() const noexcept { return m_queue->m_max_alloc; }
+
+  KOKKOS_INLINE_FUNCTION
+  long allocated_task_count_accum() const noexcept {
+    return m_queue->m_accum_alloc;
+  }
+
+  //----------------------------------------
+
+  template <class S, class Q>
+  friend void wait(Kokkos::BasicTaskScheduler<S, Q> const&);
+};
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+// Construct a TaskTeam execution policy
+
+template <class T, class Scheduler>
+Impl::TaskPolicyWithPredecessor<Impl::TaskType::TaskTeam,
+                                Kokkos::BasicFuture<T, Scheduler>>
+    KOKKOS_INLINE_FUNCTION
+    TaskTeam(Kokkos::BasicFuture<T, Scheduler> arg_future,
+             TaskPriority arg_priority = TaskPriority::Regular) {
+  return {std::move(arg_future), arg_priority};
+}
+
+template <class Scheduler>
+Impl::TaskPolicyWithScheduler<Impl::TaskType::TaskTeam, Scheduler>
+    KOKKOS_INLINE_FUNCTION TaskTeam(
+        Scheduler arg_scheduler,
+        std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value, TaskPriority>
+            arg_priority = TaskPriority::Regular) {
+  return {std::move(arg_scheduler), arg_priority};
+}
+
+template <class Scheduler, class PredecessorFuture>
+Impl::TaskPolicyWithScheduler<Kokkos::Impl::TaskType::TaskTeam, Scheduler,
+                              PredecessorFuture>
+    KOKKOS_INLINE_FUNCTION
+    TaskTeam(Scheduler arg_scheduler, PredecessorFuture arg_future,
+             std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value &&
+                                  Kokkos::is_future<PredecessorFuture>::value,
+                              TaskPriority>
+                 arg_priority = TaskPriority::Regular) {
+  static_assert(std::is_same<typename PredecessorFuture::scheduler_type,
+                             Scheduler>::value,
+                "Can't create a task policy from a scheduler and a future from "
+                "a different scheduler");
+
+  return {std::move(arg_scheduler), std::move(arg_future), arg_priority};
+}
+
+// Construct a TaskSingle execution policy
+
+template <class T, class Scheduler>
+Impl::TaskPolicyWithPredecessor<Impl::TaskType::TaskSingle,
+                                Kokkos::BasicFuture<T, Scheduler>>
+    KOKKOS_INLINE_FUNCTION
+    TaskSingle(Kokkos::BasicFuture<T, Scheduler> arg_future,
+               TaskPriority arg_priority = TaskPriority::Regular) {
+  return {std::move(arg_future), arg_priority};
+}
+
+template <class Scheduler>
+Impl::TaskPolicyWithScheduler<Impl::TaskType::TaskSingle, Scheduler>
+    KOKKOS_INLINE_FUNCTION TaskSingle(
+        Scheduler arg_scheduler,
+        std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value, TaskPriority>
+            arg_priority = TaskPriority::Regular) {
+  return {std::move(arg_scheduler), arg_priority};
+}
+
+template <class Scheduler, class PredecessorFuture>
+Impl::TaskPolicyWithScheduler<Kokkos::Impl::TaskType::TaskSingle, Scheduler,
+                              PredecessorFuture>
+    KOKKOS_INLINE_FUNCTION
+    TaskSingle(Scheduler arg_scheduler, PredecessorFuture arg_future,
+               std::enable_if_t<Kokkos::is_scheduler<Scheduler>::value &&
+                                    Kokkos::is_future<PredecessorFuture>::value,
+                                TaskPriority>
+                   arg_priority = TaskPriority::Regular) {
+  static_assert(std::is_same<typename PredecessorFuture::scheduler_type,
+                             Scheduler>::value,
+                "Can't create a task policy from a scheduler and a future from "
+                "a different scheduler");
+
+  return {std::move(arg_scheduler), std::move(arg_future), arg_priority};
+}
+
+//----------------------------------------------------------------------------
+
+/**\brief  A host control thread spawns a task with options
+ *
+ *  1) Team or Serial
+ *  2) With scheduler or dependence
+ *  3) High, Normal, or Low priority
+ */
+template <int TaskEnum, typename Scheduler, typename DepFutureType,
+          typename FunctorType>
+typename Scheduler::template future_type_for_functor<std::decay_t<FunctorType>>
+host_spawn(Impl::TaskPolicyWithScheduler<TaskEnum, Scheduler, DepFutureType>
+               arg_policy,
+           FunctorType&& arg_functor) {
+  using scheduler_type = Scheduler;
+  using task_type =
+      typename scheduler_type::template runnable_task_type<FunctorType>;
+
+  static_assert(TaskEnum == Impl::TaskType::TaskTeam ||
+                    TaskEnum == Impl::TaskType::TaskSingle,
+                "Kokkos host_spawn requires TaskTeam or TaskSingle");
+
+  // May be spawning a Cuda task, must use the specialization
+  // to query on-device function pointer.
+  typename task_type::function_type ptr;
+  typename task_type::destroy_type dtor;
+  Kokkos::Impl::TaskQueueSpecialization<
+      scheduler_type>::template get_function_pointer<task_type>(ptr, dtor);
+
+  return scheduler_type::spawn(std::move(arg_policy), ptr, dtor,
+                               std::forward<FunctorType>(arg_functor));
+}
+
+/**\brief  A task spawns a task with options
+ *
+ *  1) Team or Serial
+ *  2) With scheduler or dependence
+ *  3) High, Normal, or Low priority
+ */
+template <int TaskEnum, typename Scheduler, typename DepFutureType,
+          typename FunctorType>
+typename Scheduler::template future_type_for_functor<std::decay_t<FunctorType>>
+    KOKKOS_INLINE_FUNCTION
+    task_spawn(Impl::TaskPolicyWithScheduler<TaskEnum, Scheduler, DepFutureType>
+                   arg_policy,
+               FunctorType&& arg_functor) {
+  using scheduler_type = Scheduler;
+
+  using task_type =
+      typename scheduler_type::template runnable_task_type<FunctorType>;
+
+  static_assert(TaskEnum == Impl::TaskType::TaskTeam ||
+                    TaskEnum == Impl::TaskType::TaskSingle,
+                "Kokkos task_spawn requires TaskTeam or TaskSingle");
+
+  typename task_type::function_type const ptr = task_type::apply;
+  typename task_type::destroy_type const dtor = task_type::destroy;
+
+  return scheduler_type::spawn(std::move(arg_policy), ptr, dtor,
+                               std::forward<FunctorType>(arg_functor));
+}
+
+/**\brief  A task respawns itself with options
+ *
+ *  1) With scheduler or dependence
+ *  2) High, Normal, or Low priority
+ */
+template <typename FunctorType, typename T>
+void KOKKOS_INLINE_FUNCTION
+respawn(FunctorType* arg_self, T const& arg,
+        TaskPriority const& arg_priority = TaskPriority::Regular) {
+  static_assert(Kokkos::is_future<T>::value || Kokkos::is_scheduler<T>::value,
+                "Kokkos respawn argument must be Future or TaskScheduler");
+
+  T::scheduler_type::respawn(arg_self, arg, arg_priority);
+}
+
+//----------------------------------------------------------------------------
+
+// template<typename ValueType, typename Scheduler>
+// KOKKOS_INLINE_FUNCTION
+// BasicFuture<void, Scheduler>
+// when_all(BasicFuture<ValueType, Scheduler> const arg[], int narg)
+//{
+//  return BasicFuture<void, Scheduler>::scheduler_type::when_all(arg, narg);
+//}
+
+//----------------------------------------------------------------------------
+// Wait for all runnable tasks to complete
+
+template <class ExecSpace, class QueueType>
+inline void wait(BasicTaskScheduler<ExecSpace, QueueType> const& scheduler) {
+  using scheduler_type = BasicTaskScheduler<ExecSpace, QueueType>;
+  scheduler_type::specialization::execute(scheduler);
+  // scheduler.m_queue->execute();
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+////////////////////////////////////////////////////////////////////////////////
+// END OLD CODE
+////////////////////////////////////////////////////////////////////////////////
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_TASKSCHEDULER_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_TaskScheduler_fwd.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_TaskScheduler_fwd.hpp
new file mode 100644 (file)
index 0000000..075a9ba
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_TASKSCHEDULER_FWD_HPP
+#define KOKKOS_TASKSCHEDULER_FWD_HPP
+
+//----------------------------------------------------------------------------
+
+#include <cstddef>
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core_fwd.hpp>
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+// Forward declarations used in Impl::TaskQueue
+
+template <typename ValueType, typename Scheduler>
+class BasicFuture;
+
+template <class Space, class Queue>
+class SimpleTaskScheduler;
+
+template <class Space, class Queue>
+class BasicTaskScheduler;
+
+template <typename Space>
+struct is_scheduler : public std::false_type {};
+
+template <class Space, class Queue>
+struct is_scheduler<BasicTaskScheduler<Space, Queue>> : public std::true_type {
+};
+
+template <class Space, class Queue>
+struct is_scheduler<SimpleTaskScheduler<Space, Queue>> : public std::true_type {
+};
+
+enum class TaskPriority : int { High = 0, Regular = 1, Low = 2 };
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class Device>
+class MemoryPool;
+
+namespace Impl {
+
+template <class TaskQueueTraits>
+class TaskNode;
+
+class TaskBase;
+
+/*\brief  Implementation data for task data management, access, and execution.
+ *  (Deprecated)
+ *  CRTP Inheritance structure to allow static_cast from the
+ *  task root type and a task's FunctorType.
+ *
+ *    TaskBase< Space , ResultType , FunctorType >
+ *      : TaskBase< Space , ResultType , void >
+ *      , FunctorType
+ *      { ... };
+ *
+ *    TaskBase< Space , ResultType , void >
+ *      : TaskBase< Space , void , void >
+ *      { ... };
+ */
+template <typename Space, typename ResultType, typename FunctorType>
+class Task;
+
+class TaskQueueBase;
+
+template <typename Space, typename MemorySpace>
+class TaskQueue;
+
+template <typename ExecSpace, typename MemorySpace>
+class TaskQueueMultiple;
+
+template <typename ExecSpace, typename MemSpace, typename TaskQueueTraits,
+          class MemoryPool =
+              Kokkos::MemoryPool<Kokkos::Device<ExecSpace, MemSpace>>>
+class SingleTaskQueue;
+
+template <typename ExecSpace, typename MemSpace, typename TaskQueueTraits,
+          class MemoryPool>
+class MultipleTaskQueue;
+
+struct TaskQueueTraitsLockBased;
+
+template <size_t CircularBufferSize = 64>
+struct TaskQueueTraitsChaseLev;
+
+template <typename ResultType>
+struct TaskResult;
+
+struct TaskSchedulerBase;
+
+template <class ExecSpace>
+struct default_tasking_memory_space_for_execution_space {
+  using type = typename ExecSpace::memory_space;
+};
+
+#if defined(KOKKOS_ENABLE_CUDA)
+template <>
+struct default_tasking_memory_space_for_execution_space<Kokkos::Cuda> {
+  using type = Kokkos::CudaUVMSpace;
+};
+#endif
+
+template <class ExecSpace>
+using default_tasking_memory_space_for_execution_space_t =
+    typename default_tasking_memory_space_for_execution_space<ExecSpace>::type;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <typename Space>
+using DeprecatedTaskScheduler = BasicTaskScheduler<
+    Space,
+    Impl::TaskQueue<
+        Space,
+        Impl::default_tasking_memory_space_for_execution_space_t<Space>>>;
+
+template <typename Space>
+using DeprecatedTaskSchedulerMultiple = BasicTaskScheduler<
+    Space,
+    Impl::TaskQueueMultiple<
+        Space,
+        Impl::default_tasking_memory_space_for_execution_space_t<Space>>>;
+
+template <typename Space>
+using TaskScheduler = SimpleTaskScheduler<
+    Space,
+    Impl::SingleTaskQueue<
+        Space, Impl::default_tasking_memory_space_for_execution_space_t<Space>,
+        Impl::TaskQueueTraitsLockBased>>;
+
+template <typename Space>
+using TaskSchedulerMultiple = SimpleTaskScheduler<
+    Space,
+    Impl::MultipleTaskQueue<
+        Space, Impl::default_tasking_memory_space_for_execution_space_t<Space>,
+        Impl::TaskQueueTraitsLockBased,
+        Kokkos::MemoryPool<Kokkos::Device<
+            Space,
+            Impl::default_tasking_memory_space_for_execution_space_t<Space>>>>>;
+
+template <typename Space>
+using ChaseLevTaskScheduler = SimpleTaskScheduler<
+    Space,
+    Impl::MultipleTaskQueue<
+        Space, Impl::default_tasking_memory_space_for_execution_space_t<Space>,
+        Impl::TaskQueueTraitsChaseLev<>,
+        Kokkos::MemoryPool<Kokkos::Device<
+            Space,
+            Impl::default_tasking_memory_space_for_execution_space_t<Space>>>>>;
+
+template <class Space, class QueueType>
+void wait(BasicTaskScheduler<Space, QueueType> const&);
+
+namespace Impl {
+
+struct TaskSchedulerBase {};
+
+class TaskQueueBase {};
+
+template <typename Scheduler, typename EnableIfConstraint = void>
+class TaskQueueSpecializationConstrained {};
+
+template <typename Scheduler>
+struct TaskQueueSpecialization : TaskQueueSpecializationConstrained<Scheduler> {
+};
+
+template <int, typename>
+struct TaskPolicyData;
+
+}  // end namespace Impl
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_TASKSCHEDULER_FWD_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Threads.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Threads.hpp
new file mode 100644 (file)
index 0000000..e6dcad5
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_THREADS_HPP
+#define KOKKOS_THREADS_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_THREADS)
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <cstddef>
+#include <iosfwd>
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+class ThreadsExec;
+enum class fence_is_static { yes, no };
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+
+/** \brief  Execution space for a pool of C++11 threads on a CPU. */
+class Threads {
+ public:
+  //! \name Type declarations that all Kokkos devices must provide.
+  //@{
+  //! Tag this class as a kokkos execution space
+  using execution_space = Threads;
+  using memory_space    = Kokkos::HostSpace;
+
+  //! This execution space preferred device_type
+  using device_type = Kokkos::Device<execution_space, memory_space>;
+
+  using array_layout = Kokkos::LayoutRight;
+  using size_type    = memory_space::size_type;
+
+  using scratch_memory_space = ScratchMemorySpace<Threads>;
+
+  //@}
+  /*------------------------------------------------------------------------*/
+  //! \name Static functions that all Kokkos devices must implement.
+  //@{
+
+  /// \brief True if and only if this method is being called in a
+  ///   thread-parallel function.
+  static int in_parallel();
+
+  /// \brief Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  /// \brief Wait until all dispatched functors complete.
+  ///
+  /// The parallel_for or parallel_reduce dispatch of a functor may
+  /// return asynchronously, before the functor completes.  This
+  /// method does not return until all dispatched functors on this
+  /// device have completed.
+  static void impl_static_fence(const std::string& name);
+
+  void fence(const std::string& name =
+                 "Kokkos::Threads::fence: Unnamed Instance Fence") const;
+
+  /** \brief  Return the maximum amount of concurrency.  */
+  static int concurrency();
+
+  /// \brief Free any resources being consumed by the device.
+  ///
+  /// For the Threads device, this terminates spawned worker threads.
+  static void impl_finalize();
+
+  //@}
+  /*------------------------------------------------------------------------*/
+  /*------------------------------------------------------------------------*/
+  //! \name Space-specific functions
+  //@{
+
+  static void impl_initialize(InitializationSettings const&);
+
+  static int impl_is_initialized();
+
+  static Threads& impl_instance(int = 0);
+
+  //----------------------------------------
+
+  static int impl_thread_pool_size(int depth = 0);
+
+  static int impl_thread_pool_rank_host();
+
+  static KOKKOS_FUNCTION int impl_thread_pool_rank() {
+    KOKKOS_IF_ON_HOST((return impl_thread_pool_rank_host();))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  inline static unsigned impl_max_hardware_threads() {
+    return impl_thread_pool_size(0);
+  }
+  KOKKOS_INLINE_FUNCTION static unsigned impl_hardware_thread_id() {
+    return impl_thread_pool_rank();
+  }
+
+  uint32_t impl_instance_id() const noexcept { return 1; }
+
+  static const char* name();
+  //@}
+  //----------------------------------------
+};
+
+namespace Tools {
+namespace Experimental {
+template <>
+struct DeviceTypeTraits<Threads> {
+  static constexpr DeviceType id = DeviceType::Threads;
+  static int device_id(const Threads&) { return 0; }
+};
+}  // namespace Experimental
+}  // namespace Tools
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+struct MemorySpaceAccess<Kokkos::Threads::memory_space,
+                         Kokkos::Threads::scratch_memory_space> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = false };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+
+#include <Kokkos_ExecPolicy.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Threads/Kokkos_ThreadsExec.hpp>
+#include <Threads/Kokkos_ThreadsTeam.hpp>
+#include <Threads/Kokkos_Threads_Parallel_Range.hpp>
+#include <Threads/Kokkos_Threads_Parallel_MDRange.hpp>
+#include <Threads/Kokkos_Threads_Parallel_Team.hpp>
+#include <Threads/Kokkos_Threads_UniqueToken.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_THREADS ) */
+#endif /* #define KOKKOS_THREADS_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Timer.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Timer.hpp
new file mode 100644 (file)
index 0000000..38309b0
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_TIMER_HPP
+#define KOKKOS_TIMER_HPP
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_TIMER
+#endif
+
+#include <Kokkos_Macros.hpp>
+// gcc 10.3.0 with CUDA doesn't support std::chrono,
+// see https://github.com/kokkos/kokkos/issues/4334
+#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU == 1030) && \
+    defined(KOKKOS_COMPILER_NVCC)
+#include <sys/time.h>
+#else
+#include <chrono>
+#endif
+
+namespace Kokkos {
+
+/** \brief  Time since construction */
+
+#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU == 1030) && \
+    defined(KOKKOS_COMPILER_NVCC)
+class Timer {
+ private:
+  struct timeval m_old;
+
+ public:
+  inline void reset() { gettimeofday(&m_old, nullptr); }
+
+  inline ~Timer() = default;
+
+  inline Timer() { reset(); }
+
+  Timer(const Timer&) = delete;
+  Timer& operator=(const Timer&) = delete;
+
+  inline double seconds() const {
+    struct timeval m_new;
+
+    gettimeofday(&m_new, nullptr);
+
+    return ((double)(m_new.tv_sec - m_old.tv_sec)) +
+           ((double)(m_new.tv_usec - m_old.tv_usec) * 1.0e-6);
+  }
+};
+#else
+class Timer {
+ private:
+  std::chrono::high_resolution_clock::time_point m_old;
+
+ public:
+  inline void reset() { m_old = std::chrono::high_resolution_clock::now(); }
+
+  inline ~Timer() = default;
+
+  inline Timer() { reset(); }
+
+  Timer(const Timer&);
+  Timer& operator=(const Timer&);
+
+  inline double seconds() const {
+    std::chrono::high_resolution_clock::time_point m_new =
+        std::chrono::high_resolution_clock::now();
+    return std::chrono::duration_cast<std::chrono::duration<double> >(m_new -
+                                                                      m_old)
+        .count();
+  }
+};
+#endif
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_TIMER
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE
+#undef KOKKOS_IMPL_PUBLIC_INCLUDE_NOTDEFINED_TIMER
+#endif
+#endif /* #ifndef KOKKOS_TIMER_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Tuners.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Tuners.hpp
new file mode 100644 (file)
index 0000000..dba6027
--- /dev/null
@@ -0,0 +1,666 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_KOKKOS_TUNERS_HPP
+#define KOKKOS_KOKKOS_TUNERS_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+
+#include <array>
+#include <utility>
+#include <tuple>
+#include <string>
+#include <vector>
+#include <map>
+#include <cassert>
+
+namespace Kokkos {
+namespace Tools {
+
+namespace Experimental {
+
+// forward declarations
+SetOrRange make_candidate_set(size_t size, int64_t* data);
+bool have_tuning_tool();
+size_t declare_output_type(const std::string&,
+                           Kokkos::Tools::Experimental::VariableInfo);
+void request_output_values(size_t, size_t,
+                           Kokkos::Tools::Experimental::VariableValue*);
+VariableValue make_variable_value(size_t, int64_t);
+VariableValue make_variable_value(size_t, double);
+SetOrRange make_candidate_range(double lower, double upper, double step,
+                                bool openLower, bool openUpper);
+size_t get_new_context_id();
+void begin_context(size_t context_id);
+void end_context(size_t context_id);
+namespace Impl {
+
+/** We're going to take in search space descriptions
+ * as nested maps, which aren't efficient to
+ * iterate across by index. These are very similar
+ * to nested maps, but better for index-based lookup
+ */
+template <typename ValueType, typename ContainedType>
+struct ValueHierarchyNode;
+
+template <typename ValueType, typename ContainedType>
+struct ValueHierarchyNode {
+  std::vector<ValueType> root_values;
+  std::vector<ContainedType> sub_values;
+  void add_root_value(const ValueType& in) noexcept {
+    root_values.push_back(in);
+  }
+  void add_sub_container(const ContainedType& in) { sub_values.push_back(in); }
+  const ValueType& get_root_value(const size_t index) const {
+    return root_values[index];
+  }
+  const ContainedType& get_sub_value(const size_t index) const {
+    return sub_values[index];
+  }
+};
+
+template <typename ValueType>
+struct ValueHierarchyNode<ValueType, void> {
+  std::vector<ValueType> root_values;
+  explicit ValueHierarchyNode(std::vector<ValueType> rv)
+      : root_values(std::move(rv)) {}
+  void add_root_value(const ValueType& in) noexcept {
+    root_values.push_back(in);
+  }
+  const ValueType& get_root_value(const size_t index) const {
+    return root_values[index];
+  }
+};
+
+/** For a given nested map type, we need a way to
+ * declare the equivalent ValueHierarchyNode
+ * structure
+ */
+
+template <class NestedMap>
+struct MapTypeConverter;
+
+// Vectors are our lowest-level, no nested values
+template <class T>
+struct MapTypeConverter<std::vector<T>> {
+  using type = ValueHierarchyNode<T, void>;
+};
+
+// Maps contain both the "root" types and sub-vectors
+template <class K, class V>
+struct MapTypeConverter<std::map<K, V>> {
+  using type = ValueHierarchyNode<K, typename MapTypeConverter<V>::type>;
+};
+
+/**
+ * We also need to be able to construct a ValueHierarchyNode set from a
+ * map
+ */
+
+template <class NestedMap>
+struct ValueHierarchyConstructor;
+
+// Vectors are our lowest-level, no nested values. Just fill in the fundamental
+// values
+template <class T>
+struct ValueHierarchyConstructor<std::vector<T>> {
+  using return_type = typename MapTypeConverter<std::vector<T>>::type;
+  static return_type build(const std::vector<T>& in) { return return_type{in}; }
+};
+
+// For maps, we need to fill in the fundamental values, and construct child
+// nodes
+template <class K, class V>
+struct ValueHierarchyConstructor<std::map<K, V>> {
+  using return_type = typename MapTypeConverter<std::map<K, V>>::type;
+  static return_type build(const std::map<K, V>& in) {
+    return_type node_to_build;
+    for (auto& entry : in) {
+      node_to_build.add_root_value(entry.first);
+      node_to_build.add_sub_container(
+          ValueHierarchyConstructor<V>::build(entry.second));
+    }
+    return node_to_build;
+  }
+};
+
+/**
+ * We're going to be declaring a sparse multidimensional
+ * tuning space as a set of nested maps. The innermost level
+ * will be a vector. The dimensionality of such a space is the number of
+ * maps + 1.
+ *
+ * The following templates implement such logic recursively
+ */
+template <class InspectForDepth>
+struct get_space_dimensionality;
+
+// The dimensionality of a vector is 1
+template <class T>
+struct get_space_dimensionality<std::vector<T>> {
+  static constexpr int value = 1;
+};
+
+// The dimensionality of a map is 1 (the map) plus the dimensionality
+// of the map's value type
+template <class K, class V>
+struct get_space_dimensionality<std::map<K, V>> {
+  static constexpr int value = 1 + get_space_dimensionality<V>::value;
+};
+
+template <class T, int N>
+struct n_dimensional_sparse_structure;
+
+template <class T>
+struct n_dimensional_sparse_structure<T, 1> {
+  using type = std::vector<T>;
+};
+
+template <class T, int N>
+struct n_dimensional_sparse_structure {
+  using type =
+      std::map<T, typename n_dimensional_sparse_structure<T, N - 1>::type>;
+};
+
+/**
+ * This is the ugly part of this implementation: mapping a set of doubles in
+ * [0.0,1.0) into a point in this multidimensional space. We're going to
+ * implement this concept recursively, building up a tuple at each level.
+ */
+
+// First, a helper to get the value in one dimension
+template <class Container>
+struct DimensionValueExtractor;
+
+// At any given level, just return your value at that level
+template <class RootType, class Subtype>
+struct DimensionValueExtractor<ValueHierarchyNode<RootType, Subtype>> {
+  static RootType get(const ValueHierarchyNode<RootType, Subtype>& dimension,
+                      double fraction_to_traverse) {
+    size_t index = dimension.root_values.size() * fraction_to_traverse;
+    return dimension.get_root_value(index);
+  }
+};
+
+/** Now we're going to do the full "get a point in the space".
+ * At a root level, we'll take in a ValueHierarchyNode and a set of doubles
+ * representing the value in [0.0,1.0) we want to pick
+ */
+
+// At the bottom level, we have one double and a base-level ValueHierarchyNode
+
+template <class HierarchyNode, class... InterpolationIndices>
+struct GetMultidimensionalPoint;
+
+template <class ValueType>
+struct GetMultidimensionalPoint<ValueHierarchyNode<ValueType, void>, double> {
+  using node_type   = ValueHierarchyNode<ValueType, void>;
+  using return_type = std::tuple<ValueType>;
+  static return_type build(const node_type& in, double index) {
+    return std::make_tuple(DimensionValueExtractor<node_type>::get(in, index));
+  }
+};
+
+// At levels above the bottom, we tuple_cat the result of our child on the end
+// of our own tuple
+template <class ValueType, class Subtype, class... Indices>
+struct GetMultidimensionalPoint<ValueHierarchyNode<ValueType, Subtype>, double,
+                                Indices...> {
+  using node_type = ValueHierarchyNode<ValueType, Subtype>;
+  using sub_tuple =
+      typename GetMultidimensionalPoint<Subtype, Indices...>::return_type;
+  using return_type = decltype(std::tuple_cat(
+      std::declval<std::tuple<ValueType>>(), std::declval<sub_tuple>()));
+  static return_type build(const node_type& in, double fraction_to_traverse,
+                           Indices... indices) {
+    size_t index         = in.sub_values.size() * fraction_to_traverse;
+    auto dimension_value = std::make_tuple(
+        DimensionValueExtractor<node_type>::get(in, fraction_to_traverse));
+    return std::tuple_cat(dimension_value,
+                          GetMultidimensionalPoint<Subtype, Indices...>::build(
+                              in.get_sub_value(index), indices...));
+  }
+};
+
+template <typename PointType, class ArrayType, size_t... Is>
+auto get_point_helper(const PointType& in, const ArrayType& indices,
+                      std::index_sequence<Is...>) {
+  using helper = GetMultidimensionalPoint<
+      PointType,
+      decltype(std::get<Is>(std::declval<ArrayType>()).value.double_value)...>;
+  return helper::build(in, std::get<Is>(indices).value.double_value...);
+}
+
+template <typename PointType, typename ArrayType>
+struct GetPoint;
+
+template <typename PointType, size_t X>
+struct GetPoint<PointType,
+                std::array<Kokkos::Tools::Experimental::VariableValue, X>> {
+  using index_set_type =
+      std::array<Kokkos::Tools::Experimental::VariableValue, X>;
+  static auto build(const PointType& in, const index_set_type& indices) {
+    return get_point_helper(in, indices, std::make_index_sequence<X>{});
+  }
+};
+
+template <typename PointType, typename ArrayType>
+auto get_point(const PointType& point, const ArrayType& indices) {
+  return GetPoint<PointType, ArrayType>::build(point, indices);
+}
+
+}  // namespace Impl
+
+template <template <class...> class Container, size_t MaxDimensionSize = 100,
+          class... TemplateArguments>
+class MultidimensionalSparseTuningProblem {
+ public:
+  using ProblemSpaceInput = Container<TemplateArguments...>;
+  static constexpr int space_dimensionality =
+      Impl::get_space_dimensionality<ProblemSpaceInput>::value;
+  static constexpr size_t max_space_dimension_size = MaxDimensionSize;
+  static constexpr double tuning_min               = 0.0;
+  static constexpr double tuning_max               = 0.999;
+
+  // Not declared as static constexpr to work around the following compiler bug
+  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96862
+  // where a floating-point expression cannot be constexpr under -frounding-math
+  double tuning_step = tuning_max / max_space_dimension_size;
+
+  using StoredProblemSpace =
+      typename Impl::MapTypeConverter<ProblemSpaceInput>::type;
+  using HierarchyConstructor =
+      typename Impl::ValueHierarchyConstructor<Container<TemplateArguments...>>;
+
+  using ValueArray = std::array<Kokkos::Tools::Experimental::VariableValue,
+                                space_dimensionality>;
+  template <class Key, class Value>
+  using extended_map = std::map<Key, Value>;
+  template <typename Key>
+  using extended_problem =
+      MultidimensionalSparseTuningProblem<extended_map, MaxDimensionSize, Key,
+                                          ProblemSpaceInput>;
+  template <typename Key, typename Value>
+  using ExtendedProblemSpace =
+      typename Impl::MapTypeConverter<extended_map<Key, Value>>::type;
+
+  template <typename Key>
+  auto extend(const std::string& axis_name,
+              const std::vector<Key>& new_tuning_axis) const
+      -> extended_problem<Key> {
+    ExtendedProblemSpace<Key, ProblemSpaceInput> extended_space;
+    for (auto& key : new_tuning_axis) {
+      extended_space.add_root_value(key);
+      extended_space.add_sub_container(m_space);
+    }
+    std::vector<std::string> extended_names;
+    extended_names.reserve(m_variable_names.size() + 1);
+    extended_names.push_back(axis_name);
+    extended_names.insert(extended_names.end(), m_variable_names.begin(),
+                          m_variable_names.end());
+    return extended_problem<Key>(extended_space, extended_names);
+  }
+
+ private:
+  StoredProblemSpace m_space;
+  std::array<size_t, space_dimensionality> variable_ids;
+  std::vector<std::string> m_variable_names;
+  size_t context;
+
+ public:
+  MultidimensionalSparseTuningProblem() = default;
+
+  MultidimensionalSparseTuningProblem(StoredProblemSpace space,
+                                      const std::vector<std::string>& names)
+      : m_space(std::move(space)), m_variable_names(names) {
+    assert(names.size() == space_dimensionality);
+    for (unsigned long x = 0; x < names.size(); ++x) {
+      VariableInfo info;
+      info.type = Kokkos::Tools::Experimental::ValueType::kokkos_value_double;
+      info.category = Kokkos::Tools::Experimental::StatisticalCategory::
+          kokkos_value_interval;
+      info.valueQuantity =
+          Kokkos::Tools::Experimental::CandidateValueType::kokkos_value_range;
+      info.candidates = Kokkos::Tools::Experimental::make_candidate_range(
+          tuning_min, tuning_max, tuning_step, true, true);
+      variable_ids[x] = declare_output_type(names[x], info);
+    }
+  }
+
+  MultidimensionalSparseTuningProblem(ProblemSpaceInput space,
+                                      const std::vector<std::string>& names)
+      : MultidimensionalSparseTuningProblem(HierarchyConstructor::build(space),
+                                            names) {}
+
+  template <typename... Coordinates>
+  auto get_point(Coordinates... coordinates) {
+    using ArrayType = std::array<Kokkos::Tools::Experimental::VariableValue,
+                                 sizeof...(coordinates)>;
+    return Impl::get_point(
+        m_space, ArrayType({Kokkos::Tools::Experimental::make_variable_value(
+                     0, static_cast<double>(coordinates))...}));
+  }
+
+  auto begin() {
+    context = Kokkos::Tools::Experimental::get_new_context_id();
+    ValueArray values;
+    for (int x = 0; x < space_dimensionality; ++x) {
+      values[x] = Kokkos::Tools::Experimental::make_variable_value(
+          variable_ids[x], 0.0);
+    }
+    begin_context(context);
+    request_output_values(context, space_dimensionality, values.data());
+    return Impl::get_point(m_space, values);
+  }
+
+  auto end() { end_context(context); }
+};
+
+template <typename Tuner>
+struct ExtendableTunerMixin {
+  template <typename Key>
+  auto combine(const std::string& axis_name,
+               const std::vector<Key>& new_axis) const {
+    const auto& sub_tuner = static_cast<const Tuner*>(this)->get_tuner();
+    return sub_tuner.extend(axis_name, new_axis);
+  }
+
+  template <typename... Coordinates>
+  auto get_point(Coordinates... coordinates) {
+    const auto& sub_tuner = static_cast<const Tuner*>(this)->get_tuner();
+    return sub_tuner.get_point(coordinates...);
+  }
+};
+
+template <size_t MaxDimensionSize = 100, template <class...> class Container,
+          class... TemplateArguments>
+auto make_multidimensional_sparse_tuning_problem(
+    const Container<TemplateArguments...>& in, std::vector<std::string> names) {
+  return MultidimensionalSparseTuningProblem<Container, MaxDimensionSize,
+                                             TemplateArguments...>(in, names);
+}
+
+class TeamSizeTuner : public ExtendableTunerMixin<TeamSizeTuner> {
+ private:
+  using SpaceDescription = std::map<int64_t, std::vector<int64_t>>;
+  using TunerType = decltype(make_multidimensional_sparse_tuning_problem<20>(
+      std::declval<SpaceDescription>(),
+      std::declval<std::vector<std::string>>()));
+  TunerType tuner;
+
+ public:
+  TeamSizeTuner()        = default;
+  TeamSizeTuner& operator=(const TeamSizeTuner& other) = default;
+  TeamSizeTuner(const TeamSizeTuner& other)            = default;
+  TeamSizeTuner& operator=(TeamSizeTuner&& other) = default;
+  TeamSizeTuner(TeamSizeTuner&& other)            = default;
+  template <typename ViableConfigurationCalculator, typename Functor,
+            typename TagType, typename... Properties>
+  TeamSizeTuner(const std::string& name,
+                Kokkos::TeamPolicy<Properties...>& policy,
+                const Functor& functor, const TagType& tag,
+                ViableConfigurationCalculator calc) {
+    using PolicyType           = Kokkos::TeamPolicy<Properties...>;
+    auto initial_vector_length = policy.impl_vector_length();
+    if (initial_vector_length < 1) {
+      policy.impl_set_vector_length(1);
+    }
+    /**
+     * Here we attempt to enumerate all of the possible configurations
+     * to expose to an autotuner. There are three possibilities
+     *
+     * 1) We're tuning both vector length and team size
+     * 2) We're tuning vector length but not team size
+     * 3) We're tuning team size but not vector length
+     *
+     * (In the fourth case where nothing is tuned
+     * this function won't be called)
+     *
+     * The set of valid team sizes is dependent on
+     * a vector length, so this leads to three
+     * algorithms
+     *
+     * 1) Loop over vector lengths to get the set
+     *    of team sizes for each vector length,
+     *    add it all to the set
+     * 2) Loop over vector lengths to see if the
+     *    provided team size is valid for that
+     *    vector length. If so, add it
+     * 3) A special case of (1) in which we only
+     *    have one vector length
+     *
+     */
+    SpaceDescription space_description;
+
+    auto max_vector_length = PolicyType::vector_length_max();
+    std::vector<int64_t> allowed_vector_lengths;
+
+    if (policy.impl_auto_vector_length()) {  // case 1 or 2
+      for (int vector_length = max_vector_length; vector_length >= 1;
+           vector_length /= 2) {
+        policy.impl_set_vector_length(vector_length);
+        /**
+         * Figuring out whether a vector length is valid depends
+         * on whether we're in case 1 (tune everything) or 2 (just tune vector
+         * length)
+         *
+         * If we're tuning everything, all legal vector lengths are valid.
+         * If we're just tuning vector length, we need to check that if we
+         * set this vector length, the team size provided will be valid.
+         *
+         * These are the left and right hand sides of the "or" in this
+         * conditional, respectively.
+         */
+        auto max_team_size = calc.get_max_team_size(policy, functor, tag);
+        if ((policy.impl_auto_team_size()) ||
+            (policy.team_size() <= max_team_size)) {
+          allowed_vector_lengths.push_back(vector_length);
+        }
+      }
+    } else {  // case 3, there's only one vector length to care about
+      allowed_vector_lengths.push_back(policy.impl_vector_length());
+    }
+
+    for (const auto vector_length : allowed_vector_lengths) {
+      std::vector<int64_t> allowed_team_sizes;
+      policy.impl_set_vector_length(vector_length);
+      auto max_team_size = calc.get_max_team_size(policy, functor, tag);
+      if (policy.impl_auto_team_size()) {  // case 1 or 3, try all legal team
+                                           // sizes
+        for (int team_size = max_team_size; team_size >= 1; team_size /= 2) {
+          allowed_team_sizes.push_back(team_size);
+        }
+      } else {  // case 2, just try the provided team size
+        allowed_team_sizes.push_back(policy.team_size());
+      }
+      space_description[vector_length] = allowed_team_sizes;
+    }
+    tuner = make_multidimensional_sparse_tuning_problem<20>(
+        space_description, {std::string(name + "_vector_length"),
+                            std::string(name + "_team_size")});
+    policy.impl_set_vector_length(initial_vector_length);
+  }
+
+  template <typename... Properties>
+  void tune(Kokkos::TeamPolicy<Properties...>& policy) {
+    if (Kokkos::Tools::Experimental::have_tuning_tool()) {
+      auto configuration = tuner.begin();
+      auto team_size     = std::get<1>(configuration);
+      auto vector_length = std::get<0>(configuration);
+      if (vector_length > 0) {
+        policy.impl_set_team_size(team_size);
+        policy.impl_set_vector_length(vector_length);
+      }
+    }
+  }
+  void end() {
+    if (Kokkos::Tools::Experimental::have_tuning_tool()) {
+      tuner.end();
+    }
+  }
+
+  TunerType get_tuner() const { return tuner; }
+};
+
+namespace Impl {
+
+template <typename T>
+void fill_tile(std::vector<T>& cont, int tile_size) {
+  for (int x = 1; x < tile_size; x *= 2) {
+    cont.push_back(x);
+  }
+}
+template <typename T, typename Mapped>
+void fill_tile(std::map<T, Mapped>& cont, int tile_size) {
+  for (int x = 1; x < tile_size; x *= 2) {
+    fill_tile(cont[x], tile_size / x);
+  }
+}
+}  // namespace Impl
+
+template <int MDRangeRank>
+struct MDRangeTuner : public ExtendableTunerMixin<MDRangeTuner<MDRangeRank>> {
+ private:
+  static constexpr int rank       = MDRangeRank;
+  static constexpr int max_slices = 15;
+  using SpaceDescription =
+      typename Impl::n_dimensional_sparse_structure<int, rank>::type;
+  using TunerType =
+      decltype(make_multidimensional_sparse_tuning_problem<max_slices>(
+          std::declval<SpaceDescription>(),
+          std::declval<std::vector<std::string>>()));
+  TunerType tuner;
+
+ public:
+  MDRangeTuner() = default;
+  template <typename Functor, typename TagType, typename Calculator,
+            typename... Properties>
+  MDRangeTuner(const std::string& name,
+               const Kokkos::MDRangePolicy<Properties...>& policy,
+               const Functor& functor, const TagType& tag, Calculator calc) {
+    SpaceDescription desc;
+    int max_tile_size =
+        calc.get_mdrange_max_tile_size_product(policy, functor, tag);
+    Impl::fill_tile(desc, max_tile_size);
+    std::vector<std::string> feature_names;
+    for (int x = 0; x < rank; ++x) {
+      feature_names.push_back(name + "_tile_size_" + std::to_string(x));
+    }
+    tuner = make_multidimensional_sparse_tuning_problem<max_slices>(
+        desc, feature_names);
+  }
+  template <typename Policy, typename Tuple, size_t... Indices>
+  void set_policy_tile(Policy& policy, const Tuple& tuple,
+                       const std::index_sequence<Indices...>&) {
+    policy.impl_change_tile_size({std::get<Indices>(tuple)...});
+  }
+  template <typename... Properties>
+  void tune(Kokkos::MDRangePolicy<Properties...>& policy) {
+    if (Kokkos::Tools::Experimental::have_tuning_tool()) {
+      auto configuration = tuner.begin();
+      set_policy_tile(policy, configuration, std::make_index_sequence<rank>{});
+    }
+  }
+  void end() {
+    if (Kokkos::Tools::Experimental::have_tuning_tool()) {
+      tuner.end();
+    }
+  }
+
+  TunerType get_tuner() const { return tuner; }
+};
+
+template <class Choice>
+struct CategoricalTuner {
+  using choice_list = std::vector<Choice>;
+  choice_list choices;
+  size_t context;
+  size_t tuning_variable_id;
+  CategoricalTuner(std::string name, choice_list m_choices)
+      : choices(m_choices) {
+    std::vector<int64_t> indices;
+    for (typename decltype(choices)::size_type x = 0; x < choices.size(); ++x) {
+      indices.push_back(x);
+    }
+    VariableInfo info;
+    info.category      = StatisticalCategory::kokkos_value_categorical;
+    info.valueQuantity = CandidateValueType::kokkos_value_set;
+    info.type          = ValueType::kokkos_value_int64;
+    info.candidates    = make_candidate_set(indices.size(), indices.data());
+    tuning_variable_id = declare_output_type(name, info);
+  }
+  const Choice& begin() {
+    context = get_new_context_id();
+    begin_context(context);
+    VariableValue value = make_variable_value(tuning_variable_id, int64_t(0));
+    request_output_values(context, 1, &value);
+    return choices[value.value.int_value];
+  }
+  void end() { end_context(context); }
+};
+
+template <typename Choice>
+auto make_categorical_tuner(std::string name, std::vector<Choice> choices)
+    -> CategoricalTuner<Choice> {
+  return CategoricalTuner<Choice>(name, choices);
+}
+
+}  // namespace Experimental
+}  // namespace Tools
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_UniqueToken.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_UniqueToken.hpp
new file mode 100644 (file)
index 0000000..3c58423
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_UNIQUE_TOKEN_HPP
+#define KOKKOS_UNIQUE_TOKEN_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+enum class UniqueTokenScope : int { Instance, Global };
+
+/// \brief class to generate unique ids base on the required amount of
+/// concurrency
+///
+/// This object should behave like a ref-counted object, so that when the last
+/// instance is destroy resources are free if needed
+template <typename ExecutionSpace = Kokkos::DefaultExecutionSpace,
+          UniqueTokenScope        = UniqueTokenScope::Instance>
+class UniqueToken {
+ public:
+  using execution_space = ExecutionSpace;
+  using size_type       = typename execution_space::size_type;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const& = execution_space());
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type size() const;
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type acquire() const;
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(size_type) const;
+};
+
+/// \brief Instance scope UniqueToken allows for a max size other than
+/// execution_space::concurrency()
+///
+/// This object should behave like a ref-counted object, so that when the last
+/// instance is destroyed, resources are free if needed
+template <typename ExecutionSpace>
+class UniqueToken<ExecutionSpace, UniqueTokenScope::Instance>
+    : public UniqueToken<ExecutionSpace, UniqueTokenScope::Global> {
+ public:
+  using execution_space = ExecutionSpace;
+  using size_type       = typename execution_space::size_type;
+
+  /// \brief Create object with specified size
+  ///
+  /// It is required that max_size is >= the maximum number of concurrent
+  /// threads that will attempt to acquire the UniqueToken. This constructor is
+  /// most commonly useful when you:
+  ///   1) Have a loop bound that may be smaller than
+  ///   execution_space::concurrency().
+  ///   2) Want a per-team unique token in the range [0,
+  ///   execution_space::concurrency() / team_size)
+  UniqueToken(size_type max_size, execution_space const& = execution_space());
+};
+
+// NOTE There was an agreement amongst developers that "AcquireUniqueToken" is a
+// bad name but at this time no one has suggested a better alternative.
+
+/// \brief RAII helper for per-thread unique token values.
+///
+/// The token value will be acquired at construction and automatically
+/// released at destruction.
+template <typename ExecutionSpace,
+          UniqueTokenScope TokenScope = UniqueTokenScope::Instance>
+class AcquireUniqueToken {
+ public:
+  using exec_space = ExecutionSpace;
+  using size_type  = typename exec_space::size_type;
+  using token_type = UniqueToken<exec_space, TokenScope>;
+
+ private:
+  token_type my_token;
+  size_type my_acquired_val;
+
+ public:
+  KOKKOS_FUNCTION AcquireUniqueToken(token_type t)
+      : my_token(t), my_acquired_val(my_token.acquire()) {}
+
+  KOKKOS_FUNCTION ~AcquireUniqueToken() { my_token.release(my_acquired_val); }
+
+  KOKKOS_FUNCTION size_type value() const { return my_acquired_val; }
+};
+
+/// \brief RAII helper for per-team unique token values.
+///
+/// The token value will be acquired at construction and automatically
+/// released at destruction. All threads in a team will share the same
+/// token value.
+template <typename TeamPolicy>
+class AcquireTeamUniqueToken {
+ public:
+  using exec_space       = typename TeamPolicy::execution_space;
+  using token_type       = UniqueToken<exec_space>;
+  using size_type        = typename token_type::size_type;
+  using team_member_type = typename TeamPolicy::member_type;
+  using scratch_view =
+      Kokkos::View<size_type, typename exec_space::scratch_memory_space,
+                   Kokkos::MemoryUnmanaged>;
+
+ private:
+  token_type my_token;
+  size_type my_acquired_val;
+  scratch_view my_team_acquired_val;
+  team_member_type my_team;
+
+ public:
+  // NOTE The implementations of the constructor and destructor use
+  // `Kokkos::single()` which is an inline function defined in each backend.
+  // This creates circular dependency issues.  Moving them to a separate header
+  // is less than ideal and should be revisited later.  Having a `UniqueToken`
+  // forward declaration was considered but the non-type template parameter
+  // makes things complicated because it would require moving the definition of
+  // `UniqueTokenScope` enumeration type and its enumerators away which would
+  // hurt readability.
+  KOKKOS_FUNCTION AcquireTeamUniqueToken(token_type t, team_member_type team);
+  KOKKOS_FUNCTION ~AcquireTeamUniqueToken();
+  KOKKOS_FUNCTION size_type value() const { return my_acquired_val; }
+  static std::size_t shmem_size() { return scratch_view::shmem_size(); }
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif  // KOKKOS_UNIQUE_TOKEN_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_Vectorization.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_Vectorization.hpp
new file mode 100644 (file)
index 0000000..4314ea4
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/// \file Kokkos_Vectorization.hpp
+/// \brief Declaration and definition of Kokkos::Vectorization interface.
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_VECTORIZATION_HPP
+#define KOKKOS_VECTORIZATION_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Vectorization.hpp>
+#elif defined(KOKKOS_ENABLE_HIP)
+#include <HIP/Kokkos_HIP_Vectorization.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_View.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_View.hpp
new file mode 100644 (file)
index 0000000..e92ed7d
--- /dev/null
@@ -0,0 +1,2001 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_VIEW_HPP
+#define KOKKOS_VIEW_HPP
+
+#include <type_traits>
+#include <string>
+#include <algorithm>
+#include <initializer_list>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_MemoryTraits.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <View/Hooks/Kokkos_ViewHooks.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class DataType>
+struct ViewArrayAnalysis;
+
+template <class DataType, class ArrayLayout,
+          typename ValueType =
+              typename ViewArrayAnalysis<DataType>::non_const_value_type>
+struct ViewDataAnalysis;
+
+template <class, class...>
+class ViewMapping {
+ public:
+  enum : bool { is_assignable_data_type = false };
+  enum : bool { is_assignable = false };
+};
+
+template <typename IntType>
+constexpr KOKKOS_INLINE_FUNCTION std::size_t count_valid_integers(
+    const IntType i0, const IntType i1, const IntType i2, const IntType i3,
+    const IntType i4, const IntType i5, const IntType i6, const IntType i7) {
+  static_assert(std::is_integral<IntType>::value,
+                "count_valid_integers() must have integer arguments.");
+
+  return (i0 != KOKKOS_INVALID_INDEX) + (i1 != KOKKOS_INVALID_INDEX) +
+         (i2 != KOKKOS_INVALID_INDEX) + (i3 != KOKKOS_INVALID_INDEX) +
+         (i4 != KOKKOS_INVALID_INDEX) + (i5 != KOKKOS_INVALID_INDEX) +
+         (i6 != KOKKOS_INVALID_INDEX) + (i7 != KOKKOS_INVALID_INDEX);
+}
+
+KOKKOS_INLINE_FUNCTION
+void runtime_check_rank(const size_t rank, const size_t dyn_rank,
+                        const bool is_void_spec, const size_t i0,
+                        const size_t i1, const size_t i2, const size_t i3,
+                        const size_t i4, const size_t i5, const size_t i6,
+                        const size_t i7, const std::string& label) {
+  (void)(label);
+
+  if (is_void_spec) {
+    const size_t num_passed_args =
+        count_valid_integers(i0, i1, i2, i3, i4, i5, i6, i7);
+
+    if (num_passed_args != dyn_rank && num_passed_args != rank) {
+      KOKKOS_IF_ON_HOST(
+          const std::string message =
+              "Constructor for Kokkos View '" + label +
+              "' has mismatched number of arguments. Number of arguments = " +
+              std::to_string(num_passed_args) +
+              " but dynamic rank = " + std::to_string(dyn_rank) + " \n";
+          Kokkos::abort(message.c_str());)
+      KOKKOS_IF_ON_DEVICE(Kokkos::abort("Constructor for Kokkos View has "
+                                        "mismatched number of arguments.");)
+    }
+  }
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+// Class to provide a uniform type
+namespace Kokkos {
+namespace Impl {
+template <class ViewType, int Traits = 0>
+struct ViewUniformType;
+}
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/** \class ViewTraits
+ *  \brief Traits class for accessing attributes of a View.
+ *
+ * This is an implementation detail of View.  It is only of interest
+ * to developers implementing a new specialization of View.
+ *
+ * Template argument options:
+ *   - View< DataType >
+ *   - View< DataType , Space >
+ *   - View< DataType , Space , MemoryTraits >
+ *   - View< DataType , ArrayLayout >
+ *   - View< DataType , ArrayLayout , Space >
+ *   - View< DataType , ArrayLayout , MemoryTraits >
+ *   - View< DataType , ArrayLayout , Space , MemoryTraits >
+ *   - View< DataType , MemoryTraits >
+ */
+
+template <class DataType, class... Properties>
+struct ViewTraits;
+
+template <>
+struct ViewTraits<void> {
+  using execution_space = void;
+  using memory_space    = void;
+  using HostMirrorSpace = void;
+  using array_layout    = void;
+  using memory_traits   = void;
+  using specialize      = void;
+  using hooks_policy    = void;
+};
+
+template <class... Prop>
+struct ViewTraits<void, void, Prop...> {
+  // Ignore an extraneous 'void'
+  using execution_space = typename ViewTraits<void, Prop...>::execution_space;
+  using memory_space    = typename ViewTraits<void, Prop...>::memory_space;
+  using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
+  using array_layout    = typename ViewTraits<void, Prop...>::array_layout;
+  using memory_traits   = typename ViewTraits<void, Prop...>::memory_traits;
+  using specialize      = typename ViewTraits<void, Prop...>::specialize;
+  using hooks_policy    = typename ViewTraits<void, Prop...>::hooks_policy;
+};
+
+template <class HooksPolicy, class... Prop>
+struct ViewTraits<
+    std::enable_if_t<Kokkos::Experimental::is_hooks_policy<HooksPolicy>::value>,
+    HooksPolicy, Prop...> {
+  using execution_space = typename ViewTraits<void, Prop...>::execution_space;
+  using memory_space    = typename ViewTraits<void, Prop...>::memory_space;
+  using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
+  using array_layout    = typename ViewTraits<void, Prop...>::array_layout;
+  using memory_traits   = typename ViewTraits<void, Prop...>::memory_traits;
+  using specialize      = typename ViewTraits<void, Prop...>::specialize;
+  using hooks_policy    = HooksPolicy;
+};
+
+template <class ArrayLayout, class... Prop>
+struct ViewTraits<std::enable_if_t<Kokkos::is_array_layout<ArrayLayout>::value>,
+                  ArrayLayout, Prop...> {
+  // Specify layout, keep subsequent space and memory traits arguments
+
+  using execution_space = typename ViewTraits<void, Prop...>::execution_space;
+  using memory_space    = typename ViewTraits<void, Prop...>::memory_space;
+  using HostMirrorSpace = typename ViewTraits<void, Prop...>::HostMirrorSpace;
+  using array_layout    = ArrayLayout;
+  using memory_traits   = typename ViewTraits<void, Prop...>::memory_traits;
+  using specialize      = typename ViewTraits<void, Prop...>::specialize;
+  using hooks_policy    = typename ViewTraits<void, Prop...>::hooks_policy;
+};
+
+template <class Space, class... Prop>
+struct ViewTraits<std::enable_if_t<Kokkos::is_space<Space>::value>, Space,
+                  Prop...> {
+  // Specify Space, memory traits should be the only subsequent argument.
+
+  static_assert(
+      std::is_same<typename ViewTraits<void, Prop...>::execution_space,
+                   void>::value &&
+          std::is_same<typename ViewTraits<void, Prop...>::memory_space,
+                       void>::value &&
+          std::is_same<typename ViewTraits<void, Prop...>::HostMirrorSpace,
+                       void>::value &&
+          std::is_same<typename ViewTraits<void, Prop...>::array_layout,
+                       void>::value,
+      "Only one View Execution or Memory Space template argument");
+
+  using execution_space = typename Space::execution_space;
+  using memory_space    = typename Space::memory_space;
+  using HostMirrorSpace =
+      typename Kokkos::Impl::HostMirror<Space>::Space::memory_space;
+  using array_layout  = typename execution_space::array_layout;
+  using memory_traits = typename ViewTraits<void, Prop...>::memory_traits;
+  using specialize    = typename ViewTraits<void, Prop...>::specialize;
+  using hooks_policy  = typename ViewTraits<void, Prop...>::hooks_policy;
+};
+
+template <class MemoryTraits, class... Prop>
+struct ViewTraits<
+    std::enable_if_t<Kokkos::is_memory_traits<MemoryTraits>::value>,
+    MemoryTraits, Prop...> {
+  // Specify memory trait, should not be any subsequent arguments
+
+  static_assert(
+      std::is_same<typename ViewTraits<void, Prop...>::execution_space,
+                   void>::value &&
+          std::is_same<typename ViewTraits<void, Prop...>::memory_space,
+                       void>::value &&
+          std::is_same<typename ViewTraits<void, Prop...>::array_layout,
+                       void>::value &&
+          std::is_same<typename ViewTraits<void, Prop...>::memory_traits,
+                       void>::value &&
+          std::is_same<typename ViewTraits<void, Prop...>::hooks_policy,
+                       void>::value,
+      "MemoryTrait is the final optional template argument for a View");
+
+  using execution_space = void;
+  using memory_space    = void;
+  using HostMirrorSpace = void;
+  using array_layout    = void;
+  using memory_traits   = MemoryTraits;
+  using specialize      = void;
+  using hooks_policy    = void;
+};
+
+template <class DataType, class... Properties>
+struct ViewTraits {
+ private:
+  // Unpack the properties arguments
+  using prop = ViewTraits<void, Properties...>;
+
+  using ExecutionSpace =
+      std::conditional_t<!std::is_void<typename prop::execution_space>::value,
+                         typename prop::execution_space,
+                         Kokkos::DefaultExecutionSpace>;
+
+  using MemorySpace =
+      std::conditional_t<!std::is_void<typename prop::memory_space>::value,
+                         typename prop::memory_space,
+                         typename ExecutionSpace::memory_space>;
+
+  using ArrayLayout =
+      std::conditional_t<!std::is_void<typename prop::array_layout>::value,
+                         typename prop::array_layout,
+                         typename ExecutionSpace::array_layout>;
+
+  using HostMirrorSpace = std::conditional_t<
+      !std::is_void<typename prop::HostMirrorSpace>::value,
+      typename prop::HostMirrorSpace,
+      typename Kokkos::Impl::HostMirror<ExecutionSpace>::Space>;
+
+  using MemoryTraits =
+      std::conditional_t<!std::is_void<typename prop::memory_traits>::value,
+                         typename prop::memory_traits,
+                         typename Kokkos::MemoryManaged>;
+
+  using HooksPolicy =
+      std::conditional_t<!std::is_void<typename prop::hooks_policy>::value,
+                         typename prop::hooks_policy,
+                         Kokkos::Experimental::DefaultViewHooks>;
+
+  // Analyze data type's properties,
+  // May be specialized based upon the layout and value type
+  using data_analysis = Kokkos::Impl::ViewDataAnalysis<DataType, ArrayLayout>;
+
+ public:
+  //------------------------------------
+  // Data type traits:
+
+  using data_type           = typename data_analysis::type;
+  using const_data_type     = typename data_analysis::const_type;
+  using non_const_data_type = typename data_analysis::non_const_type;
+
+  //------------------------------------
+  // Compatible array of trivial type traits:
+
+  using scalar_array_type = typename data_analysis::scalar_array_type;
+  using const_scalar_array_type =
+      typename data_analysis::const_scalar_array_type;
+  using non_const_scalar_array_type =
+      typename data_analysis::non_const_scalar_array_type;
+
+  //------------------------------------
+  // Value type traits:
+
+  using value_type           = typename data_analysis::value_type;
+  using const_value_type     = typename data_analysis::const_value_type;
+  using non_const_value_type = typename data_analysis::non_const_value_type;
+
+  //------------------------------------
+  // Mapping traits:
+
+  using array_layout = ArrayLayout;
+  using dimension    = typename data_analysis::dimension;
+
+  using specialize = std::conditional_t<
+      std::is_void<typename data_analysis::specialize>::value,
+      typename prop::specialize,
+      typename data_analysis::specialize>; /* mapping specialization tag */
+
+  enum { rank = dimension::rank };
+  enum { rank_dynamic = dimension::rank_dynamic };
+
+  //------------------------------------
+  // Execution space, memory space, memory access traits, and host mirror space.
+
+  using execution_space   = ExecutionSpace;
+  using memory_space      = MemorySpace;
+  using device_type       = Kokkos::Device<ExecutionSpace, MemorySpace>;
+  using memory_traits     = MemoryTraits;
+  using host_mirror_space = HostMirrorSpace;
+  using hooks_policy      = HooksPolicy;
+
+  using size_type = typename MemorySpace::size_type;
+
+  enum { is_hostspace = std::is_same<MemorySpace, HostSpace>::value };
+  enum { is_managed = MemoryTraits::is_unmanaged == 0 };
+  enum { is_random_access = MemoryTraits::is_random_access == 1 };
+
+  //------------------------------------
+};
+
+/** \class View
+ *  \brief View to an array of data.
+ *
+ * A View represents an array of one or more dimensions.
+ * For details, please refer to Kokkos' tutorial materials.
+ *
+ * \section Kokkos_View_TemplateParameters Template parameters
+ *
+ * This class has both required and optional template parameters.  The
+ * \c DataType parameter must always be provided, and must always be
+ * first. The parameters \c Arg1Type, \c Arg2Type, and \c Arg3Type are
+ * placeholders for different template parameters.  The default value
+ * of the fifth template parameter \c Specialize suffices for most use
+ * cases.  When explaining the template parameters, we won't refer to
+ * \c Arg1Type, \c Arg2Type, and \c Arg3Type; instead, we will refer
+ * to the valid categories of template parameters, in whatever order
+ * they may occur.
+ *
+ * Valid ways in which template arguments may be specified:
+ *   - View< DataType >
+ *   - View< DataType , Layout >
+ *   - View< DataType , Layout , Space >
+ *   - View< DataType , Layout , Space , MemoryTraits >
+ *   - View< DataType , Space >
+ *   - View< DataType , Space , MemoryTraits >
+ *   - View< DataType , MemoryTraits >
+ *
+ * \tparam DataType (required) This indicates both the type of each
+ *   entry of the array, and the combination of compile-time and
+ *   run-time array dimension(s).  For example, <tt>double*</tt>
+ *   indicates a one-dimensional array of \c double with run-time
+ *   dimension, and <tt>int*[3]</tt> a two-dimensional array of \c int
+ *   with run-time first dimension and compile-time second dimension
+ *   (of 3).  In general, the run-time dimensions (if any) must go
+ *   first, followed by zero or more compile-time dimensions.  For
+ *   more examples, please refer to the tutorial materials.
+ *
+ * \tparam Space (required) The memory space.
+ *
+ * \tparam Layout (optional) The array's layout in memory.  For
+ *   example, LayoutLeft indicates a column-major (Fortran style)
+ *   layout, and LayoutRight a row-major (C style) layout.  If not
+ *   specified, this defaults to the preferred layout for the
+ *   <tt>Space</tt>.
+ *
+ * \tparam MemoryTraits (optional) Assertion of the user's intended
+ *   access behavior.  For example, RandomAccess indicates read-only
+ *   access with limited spatial locality, and Unmanaged lets users
+ *   wrap externally allocated memory in a View without automatic
+ *   deallocation.
+ *
+ * \section Kokkos_View_MT MemoryTraits discussion
+ *
+ * \subsection Kokkos_View_MT_Interp MemoryTraits interpretation depends on
+ * Space
+ *
+ * Some \c MemoryTraits options may have different interpretations for
+ * different \c Space types.  For example, with the Cuda device,
+ * \c RandomAccess tells Kokkos to fetch the data through the texture
+ * cache, whereas the non-GPU devices have no such hardware construct.
+ *
+ * \subsection Kokkos_View_MT_PrefUse Preferred use of MemoryTraits
+ *
+ * Users should defer applying the optional \c MemoryTraits parameter
+ * until the point at which they actually plan to rely on it in a
+ * computational kernel.  This minimizes the number of template
+ * parameters exposed in their code, which reduces the cost of
+ * compilation.  Users may always assign a View without specified
+ * \c MemoryTraits to a compatible View with that specification.
+ * For example:
+ * \code
+ * // Pass in the simplest types of View possible.
+ * void
+ * doSomething (View<double*, Cuda> out,
+ *              View<const double*, Cuda> in)
+ * {
+ *   // Assign the "generic" View in to a RandomAccess View in_rr.
+ *   // Note that RandomAccess View objects must have const data.
+ *   View<const double*, Cuda, RandomAccess> in_rr = in;
+ *   // ... do something with in_rr and out ...
+ * }
+ * \endcode
+ */
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <class T1, class T2>
+struct is_always_assignable_impl;
+
+template <class... ViewTDst, class... ViewTSrc>
+struct is_always_assignable_impl<Kokkos::View<ViewTDst...>,
+                                 Kokkos::View<ViewTSrc...>> {
+  using mapping_type = Kokkos::Impl::ViewMapping<
+      typename Kokkos::View<ViewTDst...>::traits,
+      typename Kokkos::View<ViewTSrc...>::traits,
+      typename Kokkos::View<ViewTDst...>::traits::specialize>;
+
+  constexpr static bool value =
+      mapping_type::is_assignable &&
+      static_cast<int>(Kokkos::View<ViewTDst...>::rank_dynamic) >=
+          static_cast<int>(Kokkos::View<ViewTSrc...>::rank_dynamic);
+};
+
+template <class View1, class View2>
+using is_always_assignable = is_always_assignable_impl<
+    std::remove_reference_t<View1>,
+    std::remove_const_t<std::remove_reference_t<View2>>>;
+
+#ifdef KOKKOS_ENABLE_CXX17
+template <class T1, class T2>
+inline constexpr bool is_always_assignable_v =
+    is_always_assignable<T1, T2>::value;
+#endif
+
+template <class... ViewTDst, class... ViewTSrc>
+constexpr bool is_assignable(const Kokkos::View<ViewTDst...>& dst,
+                             const Kokkos::View<ViewTSrc...>& src) {
+  using DstTraits = typename Kokkos::View<ViewTDst...>::traits;
+  using SrcTraits = typename Kokkos::View<ViewTSrc...>::traits;
+  using mapping_type =
+      Kokkos::Impl::ViewMapping<DstTraits, SrcTraits,
+                                typename DstTraits::specialize>;
+
+#ifdef KOKKOS_ENABLE_CXX17
+  return is_always_assignable_v<Kokkos::View<ViewTDst...>,
+                                Kokkos::View<ViewTSrc...>> ||
+#else
+  return is_always_assignable<Kokkos::View<ViewTDst...>,
+                              Kokkos::View<ViewTSrc...>>::value ||
+#endif
+         (mapping_type::is_assignable &&
+          ((DstTraits::dimension::rank_dynamic >= 1) ||
+           (dst.static_extent(0) == src.extent(0))) &&
+          ((DstTraits::dimension::rank_dynamic >= 2) ||
+           (dst.static_extent(1) == src.extent(1))) &&
+          ((DstTraits::dimension::rank_dynamic >= 3) ||
+           (dst.static_extent(2) == src.extent(2))) &&
+          ((DstTraits::dimension::rank_dynamic >= 4) ||
+           (dst.static_extent(3) == src.extent(3))) &&
+          ((DstTraits::dimension::rank_dynamic >= 5) ||
+           (dst.static_extent(4) == src.extent(4))) &&
+          ((DstTraits::dimension::rank_dynamic >= 6) ||
+           (dst.static_extent(5) == src.extent(5))) &&
+          ((DstTraits::dimension::rank_dynamic >= 7) ||
+           (dst.static_extent(6) == src.extent(6))) &&
+          ((DstTraits::dimension::rank_dynamic >= 8) ||
+           (dst.static_extent(7) == src.extent(7))));
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#include <impl/Kokkos_ViewMapping.hpp>
+#include <impl/Kokkos_ViewArray.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace {
+
+constexpr Kokkos::Impl::ALL_t ALL = Kokkos::Impl::ALL_t();
+
+constexpr Kokkos::Impl::WithoutInitializing_t WithoutInitializing =
+    Kokkos::Impl::WithoutInitializing_t();
+
+constexpr Kokkos::Impl::AllowPadding_t AllowPadding =
+    Kokkos::Impl::AllowPadding_t();
+
+}  // namespace
+
+/** \brief  Create View allocation parameter bundle from argument list.
+ *
+ *  Valid argument list members are:
+ *    1) label as a "string" or std::string
+ *    2) memory space instance of the View::memory_space type
+ *    3) execution space instance compatible with the View::memory_space
+ *    4) Kokkos::WithoutInitializing to bypass initialization
+ *    4) Kokkos::AllowPadding to allow allocation to pad dimensions for memory
+ * alignment
+ */
+template <class... Args>
+inline Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>
+view_alloc(Args const&... args) {
+  using return_type =
+      Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>;
+
+  static_assert(!return_type::has_pointer,
+                "Cannot give pointer-to-memory for view allocation");
+
+  return return_type(args...);
+}
+
+template <class... Args>
+KOKKOS_INLINE_FUNCTION
+    Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>
+    view_wrap(Args const&... args) {
+  using return_type =
+      Impl::ViewCtorProp<typename Impl::ViewCtorProp<void, Args>::type...>;
+
+  static_assert(!return_type::has_memory_space &&
+                    !return_type::has_execution_space &&
+                    !return_type::has_label && return_type::has_pointer,
+                "Must only give pointer-to-memory for view wrapping");
+
+  return return_type(args...);
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class DataType, class... Properties>
+class View;
+
+template <class>
+struct is_view : public std::false_type {};
+
+template <class D, class... P>
+struct is_view<View<D, P...>> : public std::true_type {};
+
+template <class D, class... P>
+struct is_view<const View<D, P...>> : public std::true_type {};
+
+template <class DataType, class... Properties>
+class View : public ViewTraits<DataType, Properties...> {
+ private:
+  template <class, class...>
+  friend class View;
+  template <class, class...>
+  friend class Kokkos::Impl::ViewMapping;
+
+  using view_tracker_type = Kokkos::Impl::ViewTracker<View>;
+
+ public:
+  using traits = ViewTraits<DataType, Properties...>;
+
+ private:
+  using map_type =
+      Kokkos::Impl::ViewMapping<traits, typename traits::specialize>;
+  template <typename V>
+  friend struct Kokkos::Impl::ViewTracker;
+  using hooks_policy = typename traits::hooks_policy;
+
+  view_tracker_type m_track;
+  map_type m_map;
+
+ public:
+  //----------------------------------------
+  /** \brief  Compatible view of array of scalar types */
+  using array_type =
+      View<typename traits::scalar_array_type, typename traits::array_layout,
+           typename traits::device_type, typename traits::hooks_policy,
+           typename traits::memory_traits>;
+
+  /** \brief  Compatible view of const data type */
+  using const_type =
+      View<typename traits::const_data_type, typename traits::array_layout,
+           typename traits::device_type, typename traits::hooks_policy,
+           typename traits::memory_traits>;
+
+  /** \brief  Compatible view of non-const data type */
+  using non_const_type =
+      View<typename traits::non_const_data_type, typename traits::array_layout,
+           typename traits::device_type, typename traits::hooks_policy,
+           typename traits::memory_traits>;
+
+  /** \brief  Compatible HostMirror view */
+  using HostMirror =
+      View<typename traits::non_const_data_type, typename traits::array_layout,
+           Device<DefaultHostExecutionSpace,
+                  typename traits::host_mirror_space::memory_space>,
+           typename traits::hooks_policy>;
+
+  /** \brief  Compatible HostMirror view */
+  using host_mirror_type =
+      View<typename traits::non_const_data_type, typename traits::array_layout,
+           typename traits::host_mirror_space, typename traits::hooks_policy>;
+
+  /** \brief Unified types */
+  using uniform_type = typename Impl::ViewUniformType<View, 0>::type;
+  using uniform_const_type =
+      typename Impl::ViewUniformType<View, 0>::const_type;
+  using uniform_runtime_type =
+      typename Impl::ViewUniformType<View, 0>::runtime_type;
+  using uniform_runtime_const_type =
+      typename Impl::ViewUniformType<View, 0>::runtime_const_type;
+  using uniform_nomemspace_type =
+      typename Impl::ViewUniformType<View, 0>::nomemspace_type;
+  using uniform_const_nomemspace_type =
+      typename Impl::ViewUniformType<View, 0>::const_nomemspace_type;
+  using uniform_runtime_nomemspace_type =
+      typename Impl::ViewUniformType<View, 0>::runtime_nomemspace_type;
+  using uniform_runtime_const_nomemspace_type =
+      typename Impl::ViewUniformType<View, 0>::runtime_const_nomemspace_type;
+
+  //----------------------------------------
+  // Domain rank and extents
+
+  enum { Rank = map_type::Rank };
+
+  /** \brief rank() to be implemented
+   */
+  // KOKKOS_INLINE_FUNCTION
+  // static
+  // constexpr unsigned rank() { return map_type::Rank; }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, size_t>
+  extent(const iType& r) const noexcept {
+    return m_map.extent(r);
+  }
+
+  static KOKKOS_INLINE_FUNCTION constexpr size_t static_extent(
+      const unsigned r) noexcept {
+    return map_type::static_extent(r);
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, int>
+  extent_int(const iType& r) const noexcept {
+    return static_cast<int>(m_map.extent(r));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr typename traits::array_layout layout()
+      const {
+    return m_map.layout();
+  }
+
+  //----------------------------------------
+  /*  Deprecate all 'dimension' functions in favor of
+   *  ISO/C++ vocabulary 'extent'.
+   */
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t size() const {
+    return m_map.dimension_0() * m_map.dimension_1() * m_map.dimension_2() *
+           m_map.dimension_3() * m_map.dimension_4() * m_map.dimension_5() *
+           m_map.dimension_6() * m_map.dimension_7();
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
+    return m_map.stride_0();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
+    return m_map.stride_1();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
+    return m_map.stride_2();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
+    return m_map.stride_3();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
+    return m_map.stride_4();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
+    return m_map.stride_5();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
+    return m_map.stride_6();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
+    return m_map.stride_7();
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+      std::is_integral<iType>::value, size_t>
+  stride(iType r) const {
+    return (
+        r == 0
+            ? m_map.stride_0()
+            : (r == 1
+                   ? m_map.stride_1()
+                   : (r == 2
+                          ? m_map.stride_2()
+                          : (r == 3
+                                 ? m_map.stride_3()
+                                 : (r == 4
+                                        ? m_map.stride_4()
+                                        : (r == 5
+                                               ? m_map.stride_5()
+                                               : (r == 6
+                                                      ? m_map.stride_6()
+                                                      : m_map.stride_7())))))));
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    m_map.stride(s);
+  }
+
+  //----------------------------------------
+  // Range span is the span which contains all members.
+
+  using reference_type = typename map_type::reference_type;
+  using pointer_type   = typename map_type::pointer_type;
+
+  enum {
+    reference_type_is_lvalue_reference =
+        std::is_lvalue_reference<reference_type>::value
+  };
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t span() const { return m_map.span(); }
+  KOKKOS_INLINE_FUNCTION bool span_is_contiguous() const {
+    return m_map.span_is_contiguous();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr bool is_allocated() const {
+    return m_map.data() != nullptr;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
+    return m_map.data();
+  }
+
+  //----------------------------------------
+  // Allow specializations to query their specialized map
+
+  KOKKOS_INLINE_FUNCTION
+  const Kokkos::Impl::ViewMapping<traits, typename traits::specialize>&
+  impl_map() const {
+    return m_map;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const Kokkos::Impl::SharedAllocationTracker& impl_track() const {
+    return m_track.m_tracker;
+  }
+  //----------------------------------------
+
+ private:
+  static constexpr bool is_layout_left =
+      std::is_same<typename traits::array_layout, Kokkos::LayoutLeft>::value;
+
+  static constexpr bool is_layout_right =
+      std::is_same<typename traits::array_layout, Kokkos::LayoutRight>::value;
+
+  static constexpr bool is_layout_stride =
+      std::is_same<typename traits::array_layout, Kokkos::LayoutStride>::value;
+
+  static constexpr bool is_default_map =
+      std::is_void<typename traits::specialize>::value &&
+      (is_layout_left || is_layout_right || is_layout_stride);
+
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+
+#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(...)                               \
+  Kokkos::Impl::runtime_check_memory_access_violation<                      \
+      typename traits::memory_space>(                                       \
+      "Kokkos::View ERROR: attempt to access inaccessible memory space",    \
+      __VA_ARGS__);                                                         \
+  Kokkos::Impl::view_verify_operator_bounds<typename traits::memory_space>( \
+      __VA_ARGS__);
+
+#else
+
+#define KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(...)                            \
+  Kokkos::Impl::runtime_check_memory_access_violation<                   \
+      typename traits::memory_space>(                                    \
+      "Kokkos::View ERROR: attempt to access inaccessible memory space", \
+      __VA_ARGS__);
+
+#endif
+
+  template <typename... Is>
+  static KOKKOS_FUNCTION void check_access_member_function_valid_args(Is...) {
+    static_assert(Rank <= sizeof...(Is), "");
+    static_assert(sizeof...(Is) <= 8, "");
+    static_assert(Kokkos::Impl::are_integral<Is...>::value, "");
+  }
+
+  template <typename... Is>
+  static KOKKOS_FUNCTION void check_operator_parens_valid_args(Is...) {
+    static_assert(Rank == sizeof...(Is), "");
+    static_assert(Kokkos::Impl::are_integral<Is...>::value, "");
+  }
+
+ public:
+  //------------------------------
+  // Rank 1 default map operator()
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0>::value &&  //
+                        (1 == Rank) && is_default_map && !is_layout_stride),
+                       reference_type>
+      operator()(I0 i0) const {
+    check_operator_parens_valid_args(i0);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0)
+    return m_map.m_impl_handle[i0];
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0>::value &&  //
+                        (1 == Rank) && is_default_map && is_layout_stride),
+                       reference_type>
+      operator()(I0 i0) const {
+    check_operator_parens_valid_args(i0);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0)
+    return m_map.m_impl_handle[m_map.m_impl_offset.m_stride.S0 * i0];
+  }
+
+  //------------------------------
+  // Rank 1 operator[]
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      ((1 == Rank) && Kokkos::Impl::are_integral<I0>::value && !is_default_map),
+      reference_type>
+  operator[](I0 i0) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0)
+    return m_map.reference(i0);
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<((1 == Rank) && Kokkos::Impl::are_integral<I0>::value &&
+                        is_default_map && !is_layout_stride),
+                       reference_type>
+      operator[](I0 i0) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0)
+    return m_map.m_impl_handle[i0];
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<((1 == Rank) && Kokkos::Impl::are_integral<I0>::value &&
+                        is_default_map && is_layout_stride),
+                       reference_type>
+      operator[](I0 i0) const {
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0)
+    return m_map.m_impl_handle[m_map.m_impl_offset.m_stride.S0 * i0];
+  }
+
+  //------------------------------
+  // Rank 2 default map operator()
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value &&  //
+                        (2 == Rank) && is_default_map && is_layout_left &&
+                        (traits::rank_dynamic == 0)),
+                       reference_type>
+      operator()(I0 i0, I1 i1) const {
+    check_operator_parens_valid_args(i0, i1);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
+    return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_dim.N0 * i1];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value &&  //
+                        (2 == Rank) && is_default_map && is_layout_left &&
+                        (traits::rank_dynamic != 0)),
+                       reference_type>
+      operator()(I0 i0, I1 i1) const {
+    check_operator_parens_valid_args(i0, i1);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
+    return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_stride * i1];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value &&  //
+                        (2 == Rank) && is_default_map && is_layout_right &&
+                        (traits::rank_dynamic == 0)),
+                       reference_type>
+      operator()(I0 i0, I1 i1) const {
+    check_operator_parens_valid_args(i0, i1);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
+    return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_dim.N1 * i0];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value &&  //
+                        (2 == Rank) && is_default_map && is_layout_right &&
+                        (traits::rank_dynamic != 0)),
+                       reference_type>
+      operator()(I0 i0, I1 i1) const {
+    check_operator_parens_valid_args(i0, i1);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
+    return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_stride * i0];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1>::value &&  //
+                        (2 == Rank) && is_default_map && is_layout_stride),
+                       reference_type>
+      operator()(I0 i0, I1 i1) const {
+    check_operator_parens_valid_args(i0, i1);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1)
+    return m_map.m_impl_handle[i0 * m_map.m_impl_offset.m_stride.S0 +
+                               i1 * m_map.m_impl_offset.m_stride.S1];
+  }
+
+  // Rank 0 -> 8 operator() except for rank-1 and rank-2 with default map which
+  // have "inlined" versions above
+
+  template <typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<Is...>::value &&  //
+       (2 != Rank) && (1 != Rank) && (0 != Rank) && is_default_map),
+      reference_type>
+  operator()(Is... indices) const {
+    check_operator_parens_valid_args(indices...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, indices...)
+    return m_map.m_impl_handle[m_map.m_impl_offset(indices...)];
+  }
+
+  template <typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<Is...>::value &&  //
+                        ((0 == Rank) || !is_default_map)),
+                       reference_type>
+      operator()(Is... indices) const {
+    check_operator_parens_valid_args(indices...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, indices...)
+    return m_map.reference(indices...);
+  }
+
+  //------------------------------
+  // Rank 0
+
+  template <typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<Is...>::value && (0 == Rank)), reference_type>
+  access(Is... extra) const {
+    check_access_member_function_valid_args(extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, extra...)
+    return m_map.reference();
+  }
+
+  //------------------------------
+  // Rank 1
+
+  template <typename I0, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, Is...>::value &&
+                        (1 == Rank) && !is_default_map),
+                       reference_type>
+      access(I0 i0, Is... extra) const {
+    check_access_member_function_valid_args(i0, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, extra...)
+    return m_map.reference(i0);
+  }
+
+  template <typename I0, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, Is...>::value &&
+                        (1 == Rank) && is_default_map && !is_layout_stride),
+                       reference_type>
+      access(I0 i0, Is... extra) const {
+    check_access_member_function_valid_args(i0, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, extra...)
+    return m_map.m_impl_handle[i0];
+  }
+
+  template <typename I0, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, Is...>::value &&
+                        (1 == Rank) && is_default_map && is_layout_stride),
+                       reference_type>
+      access(I0 i0, Is... extra) const {
+    check_access_member_function_valid_args(i0, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, extra...)
+    return m_map.m_impl_handle[m_map.m_impl_offset.m_stride.S0 * i0];
+  }
+
+  //------------------------------
+  // Rank 2
+
+  template <typename I0, typename I1, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, Is...>::value &&
+                        (2 == Rank) && !is_default_map),
+                       reference_type>
+      access(I0 i0, I1 i1, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
+    return m_map.reference(i0, i1);
+  }
+
+  template <typename I0, typename I1, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
+       is_default_map && is_layout_left && (traits::rank_dynamic == 0)),
+      reference_type>
+  access(I0 i0, I1 i1, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
+    return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_dim.N0 * i1];
+  }
+
+  template <typename I0, typename I1, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
+       is_default_map && is_layout_left && (traits::rank_dynamic != 0)),
+      reference_type>
+  access(I0 i0, I1 i1, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
+    return m_map.m_impl_handle[i0 + m_map.m_impl_offset.m_stride * i1];
+  }
+
+  template <typename I0, typename I1, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
+       is_default_map && is_layout_right && (traits::rank_dynamic == 0)),
+      reference_type>
+  access(I0 i0, I1 i1, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
+    return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_dim.N1 * i0];
+  }
+
+  template <typename I0, typename I1, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, Is...>::value && (2 == Rank) &&
+       is_default_map && is_layout_right && (traits::rank_dynamic != 0)),
+      reference_type>
+  access(I0 i0, I1 i1, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
+    return m_map.m_impl_handle[i1 + m_map.m_impl_offset.m_stride * i0];
+  }
+
+  template <typename I0, typename I1, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, Is...>::value &&
+                        (2 == Rank) && is_default_map && is_layout_stride),
+                       reference_type>
+      access(I0 i0, I1 i1, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, extra...)
+    return m_map.m_impl_handle[i0 * m_map.m_impl_offset.m_stride.S0 +
+                               i1 * m_map.m_impl_offset.m_stride.S1];
+  }
+
+  //------------------------------
+  // Rank 3
+
+  template <typename I0, typename I1, typename I2, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, Is...>::value &&
+                        (3 == Rank) && is_default_map),
+                       reference_type>
+      access(I0 i0, I1 i1, I2 i2, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, extra...)
+    return m_map.m_impl_handle[m_map.m_impl_offset(i0, i1, i2)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, Is...>::value &&
+                        (3 == Rank) && !is_default_map),
+                       reference_type>
+      access(I0 i0, I1 i1, I2 i2, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, extra...)
+    return m_map.reference(i0, i1, i2);
+  }
+
+  //------------------------------
+  // Rank 4
+
+  template <typename I0, typename I1, typename I2, typename I3, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, Is...>::value && (4 == Rank) &&
+       is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, extra...)
+    return m_map.m_impl_handle[m_map.m_impl_offset(i0, i1, i2, i3)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, Is...>::value && (4 == Rank) &&
+       !is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, extra...)
+    return m_map.reference(i0, i1, i2, i3);
+  }
+
+  //------------------------------
+  // Rank 5
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, I4, Is...>::value &&
+       (5 == Rank) && is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4,
+                                     extra...)
+    return m_map.m_impl_handle[m_map.m_impl_offset(i0, i1, i2, i3, i4)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, I4, Is...>::value &&
+       (5 == Rank) && !is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4,
+                                     extra...)
+    return m_map.reference(i0, i1, i2, i3, i4);
+  }
+
+  //------------------------------
+  // Rank 6
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, Is...>::value &&
+       (6 == Rank) && is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4, i5,
+                                     extra...)
+    return m_map.m_impl_handle[m_map.m_impl_offset(i0, i1, i2, i3, i4, i5)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, Is...>::value &&
+       (6 == Rank) && !is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4, i5,
+                                     extra...)
+    return m_map.reference(i0, i1, i2, i3, i4, i5);
+  }
+
+  //------------------------------
+  // Rank 7
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6, Is...>::value &&
+       (7 == Rank) && is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, i6,
+                                            extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4, i5, i6,
+                                     extra...)
+    return m_map.m_impl_handle[m_map.m_impl_offset(i0, i1, i2, i3, i4, i5, i6)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      (Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6, Is...>::value &&
+       (7 == Rank) && !is_default_map),
+      reference_type>
+  access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, i6,
+                                            extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4, i5, i6,
+                                     extra...)
+    return m_map.reference(i0, i1, i2, i3, i4, i5, i6);
+  }
+
+  //------------------------------
+  // Rank 8
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6,
+                                                  I7, Is...>::value &&
+                        (8 == Rank) && is_default_map),
+                       reference_type>
+      access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, I7 i7,
+             Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, i6, i7,
+                                            extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4, i5, i6,
+                                     i7, extra...)
+    return m_map
+        .m_impl_handle[m_map.m_impl_offset(i0, i1, i2, i3, i4, i5, i6, i7)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7, typename... Is>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(Kokkos::Impl::always_true<I0, I1, I2, I3, I4, I5, I6,
+                                                  I7, Is...>::value &&
+                        (8 == Rank) && !is_default_map),
+                       reference_type>
+      access(I0 i0, I1 i1, I2 i2, I3 i3, I4 i4, I5 i5, I6 i6, I7 i7,
+             Is... extra) const {
+    check_access_member_function_valid_args(i0, i1, i2, i3, i4, i5, i6, i7,
+                                            extra...);
+    KOKKOS_IMPL_VIEW_OPERATOR_VERIFY(m_track, m_map, i0, i1, i2, i3, i4, i5, i6,
+                                     i7, extra...)
+    return m_map.reference(i0, i1, i2, i3, i4, i5, i6, i7);
+  }
+
+#undef KOKKOS_IMPL_VIEW_OPERATOR_VERIFY
+
+  //----------------------------------------
+  // Standard destructor, constructors, and assignment operators
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~View() = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  View() = default;
+
+  KOKKOS_FUNCTION
+  View(const View& other) : m_track(other.m_track), m_map(other.m_map) {
+    KOKKOS_IF_ON_HOST((hooks_policy::copy_construct(*this, other);))
+  }
+
+  KOKKOS_FUNCTION
+  View(View&& other)
+      : m_track{std::move(other.m_track)}, m_map{std::move(other.m_map)} {
+    KOKKOS_IF_ON_HOST((hooks_policy::move_construct(*this, other);))
+  }
+
+  KOKKOS_FUNCTION
+  View& operator=(const View& other) {
+    m_map   = other.m_map;
+    m_track = other.m_track;
+
+    KOKKOS_IF_ON_HOST((hooks_policy::copy_assign(*this, other);))
+
+    return *this;
+  }
+
+  KOKKOS_FUNCTION
+  View& operator=(View&& other) {
+    m_map   = std::move(other.m_map);
+    m_track = std::move(other.m_track);
+
+    KOKKOS_IF_ON_HOST((hooks_policy::move_assign(*this, other);))
+
+    return *this;
+  }
+
+  //----------------------------------------
+  // Compatible view copy constructor and assignment
+  // may assign unmanaged from managed.
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION View(
+      const View<RT, RP...>& rhs,
+      std::enable_if_t<Kokkos::Impl::ViewMapping<
+          traits, typename View<RT, RP...>::traits,
+          typename traits::specialize>::is_assignable_data_type>* = nullptr)
+      : m_track(rhs), m_map() {
+    using SrcTraits = typename View<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits,
+                                              typename traits::specialize>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible View copy construction");
+    Mapping::assign(m_map, rhs.m_map, rhs.m_track.m_tracker);
+  }
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<
+      Kokkos::Impl::ViewMapping<
+          traits, typename View<RT, RP...>::traits,
+          typename traits::specialize>::is_assignable_data_type,
+      View>&
+  operator=(const View<RT, RP...>& rhs) {
+    using SrcTraits = typename View<RT, RP...>::traits;
+    using Mapping   = Kokkos::Impl::ViewMapping<traits, SrcTraits,
+                                              typename traits::specialize>;
+    static_assert(Mapping::is_assignable, "Incompatible View copy assignment");
+    Mapping::assign(m_map, rhs.m_map, rhs.m_track.m_tracker);
+    m_track.assign(rhs);
+    return *this;
+  }
+
+  //----------------------------------------
+  // Compatible subview constructor
+  // may assign unmanaged from managed.
+
+  template <class RT, class... RP, class Arg0, class... Args>
+  KOKKOS_INLINE_FUNCTION View(const View<RT, RP...>& src_view, const Arg0 arg0,
+                              Args... args)
+      : m_track(src_view), m_map() {
+    using SrcType = View<RT, RP...>;
+
+    using Mapping = Kokkos::Impl::ViewMapping<void, typename SrcType::traits,
+                                              Arg0, Args...>;
+
+    using DstType = typename Mapping::type;
+
+    static_assert(
+        Kokkos::Impl::ViewMapping<traits, typename DstType::traits,
+                                  typename traits::specialize>::is_assignable,
+        "Subview construction requires compatible view and subview arguments");
+
+    Mapping::assign(m_map, src_view.m_map, arg0, args...);
+  }
+
+  //----------------------------------------
+  // Allocation tracking properties
+
+  KOKKOS_INLINE_FUNCTION
+  int use_count() const { return m_track.m_tracker.use_count(); }
+
+  inline const std::string label() const {
+    return m_track.m_tracker
+        .template get_label<typename traits::memory_space>();
+  }
+
+ private:
+  enum class check_input_args : bool { yes = true, no = false };
+
+ public:
+  //----------------------------------------
+  // Allocation according to allocation properties and array layout
+
+  template <class... P>
+  explicit inline View(
+      const Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<!Impl::ViewCtorProp<P...>::has_pointer,
+                       typename traits::array_layout> const& arg_layout,
+      check_input_args check_args = check_input_args::no)
+      : m_track(), m_map() {
+    // Append layout and spaces if not input
+    using alloc_prop_input = Impl::ViewCtorProp<P...>;
+
+    // use 'std::integral_constant<unsigned,I>' for non-types
+    // to avoid duplicate class error.
+    using alloc_prop = Impl::ViewCtorProp<
+        P...,
+        std::conditional_t<alloc_prop_input::has_label,
+                           std::integral_constant<unsigned int, 0>,
+                           std::string>,
+        std::conditional_t<alloc_prop_input::has_memory_space,
+                           std::integral_constant<unsigned int, 1>,
+                           typename traits::device_type::memory_space>,
+        std::conditional_t<alloc_prop_input::has_execution_space,
+                           std::integral_constant<unsigned int, 2>,
+                           typename traits::device_type::execution_space>>;
+
+    static_assert(traits::is_managed,
+                  "View allocation constructor requires managed memory");
+
+    if (alloc_prop::initialize &&
+        !alloc_prop::execution_space::impl_is_initialized()) {
+      // If initializing view data then
+      // the execution space must be initialized.
+      Kokkos::Impl::throw_runtime_exception(
+          "Constructing View and initializing data with uninitialized "
+          "execution space");
+    }
+
+    // Copy the input allocation properties with possibly defaulted properties
+    alloc_prop prop_copy(arg_prop);
+
+    if (check_args == check_input_args::yes) {
+      size_t i0 = arg_layout.dimension[0];
+      size_t i1 = arg_layout.dimension[1];
+      size_t i2 = arg_layout.dimension[2];
+      size_t i3 = arg_layout.dimension[3];
+      size_t i4 = arg_layout.dimension[4];
+      size_t i5 = arg_layout.dimension[5];
+      size_t i6 = arg_layout.dimension[6];
+      size_t i7 = arg_layout.dimension[7];
+
+      const std::string& alloc_name =
+          static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
+              prop_copy)
+              .value;
+      Impl::runtime_check_rank(
+          traits::rank, traits::rank_dynamic,
+          std::is_same<typename traits::specialize, void>::value, i0, i1, i2,
+          i3, i4, i5, i6, i7, alloc_name);
+    }
+
+//------------------------------------------------------------
+#if defined(KOKKOS_ENABLE_CUDA)
+    // If allocating in CudaUVMSpace must fence before and after
+    // the allocation to protect against possible concurrent access
+    // on the CPU and the GPU.
+    // Fence using the trait's execution space (which will be Kokkos::Cuda)
+    // to avoid incomplete type errors from using Kokkos::Cuda directly.
+    if (std::is_same<Kokkos::CudaUVMSpace,
+                     typename traits::device_type::memory_space>::value) {
+      typename traits::device_type::memory_space::execution_space().fence(
+          "Kokkos::View<...>::View: fence before allocating UVM");
+    }
+#endif
+    //------------------------------------------------------------
+
+    Kokkos::Impl::SharedAllocationRecord<>* record = m_map.allocate_shared(
+        prop_copy, arg_layout, Impl::ViewCtorProp<P...>::has_execution_space);
+
+//------------------------------------------------------------
+#if defined(KOKKOS_ENABLE_CUDA)
+    if (std::is_same<Kokkos::CudaUVMSpace,
+                     typename traits::device_type::memory_space>::value) {
+      typename traits::device_type::memory_space::execution_space().fence(
+          "Kokkos::View<...>::View: fence after allocating UVM");
+    }
+#endif
+    //------------------------------------------------------------
+
+    // Setup and initialization complete, start tracking
+    m_track.m_tracker.assign_allocated_record_to_uninitialized(record);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void assign_data(pointer_type arg_data) {
+    m_track.m_tracker.clear();
+    m_map.assign_data(arg_data);
+  }
+
+  // Wrap memory according to properties and array layout
+  template <class... P>
+  explicit KOKKOS_INLINE_FUNCTION View(
+      const Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<Impl::ViewCtorProp<P...>::has_pointer,
+                       typename traits::array_layout> const& arg_layout,
+      check_input_args /*ignored*/ = check_input_args::no)  // Not checking
+      : m_track()  // No memory tracking
+        ,
+        m_map(arg_prop, arg_layout) {
+    static_assert(
+        std::is_same<pointer_type,
+                     typename Impl::ViewCtorProp<P...>::pointer_type>::value,
+        "Constructing View to wrap user memory must supply matching pointer "
+        "type");
+  }
+
+  // Simple dimension-only layout
+  template <class... P>
+  explicit inline View(
+      const Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<!Impl::ViewCtorProp<P...>::has_pointer, size_t> const
+          arg_N0          = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : View(arg_prop,
+             typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+                                           arg_N4, arg_N5, arg_N6, arg_N7),
+             check_input_args::yes) {
+    static_assert(traits::array_layout::is_extent_constructible,
+                  "Layout is not constructible from extent arguments. Use "
+                  "overload taking a layout object instead.");
+  }
+
+  template <class... P>
+  explicit KOKKOS_INLINE_FUNCTION View(
+      const Impl::ViewCtorProp<P...>& arg_prop,
+      std::enable_if_t<Impl::ViewCtorProp<P...>::has_pointer, size_t> const
+          arg_N0          = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : View(arg_prop,
+             typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+                                           arg_N4, arg_N5, arg_N6, arg_N7),
+             check_input_args::yes) {
+    static_assert(traits::array_layout::is_extent_constructible,
+                  "Layout is not constructible from extent arguments. Use "
+                  "overload taking a layout object instead.");
+  }
+
+  // Allocate with label and layout
+  template <typename Label>
+  explicit inline View(
+      const Label& arg_label,
+      std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value,
+                       typename traits::array_layout> const& arg_layout)
+      : View(Impl::ViewCtorProp<std::string>(arg_label), arg_layout,
+             check_input_args::yes) {}
+
+  // Allocate label and layout, must disambiguate from subview constructor.
+  template <typename Label>
+  explicit inline View(
+      const Label& arg_label,
+      std::enable_if_t<Kokkos::Impl::is_view_label<Label>::value, const size_t>
+          arg_N0          = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : View(Impl::ViewCtorProp<std::string>(arg_label),
+             typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+                                           arg_N4, arg_N5, arg_N6, arg_N7),
+             check_input_args::yes) {
+    static_assert(traits::array_layout::is_extent_constructible,
+                  "Layout is not constructible from extent arguments. Use "
+                  "overload taking a layout object instead.");
+  }
+
+  // Construct view from ViewTracker and map
+  // This should be the preferred method because future extensions may need to
+  // use the ViewTracker class.
+  template <class Traits>
+  KOKKOS_INLINE_FUNCTION View(
+      const view_tracker_type& track,
+      const Kokkos::Impl::ViewMapping<Traits, typename Traits::specialize>& map)
+      : m_track(track), m_map() {
+    using Mapping =
+        Kokkos::Impl::ViewMapping<traits, Traits, typename traits::specialize>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible View copy construction");
+    Mapping::assign(m_map, map, track.m_tracker);
+  }
+
+  // Construct View from internal shared allocation tracker object and map
+  // This is here for backwards compatibility for classes that derive from
+  // Kokkos::View
+  template <class Traits>
+  KOKKOS_INLINE_FUNCTION View(
+      const typename view_tracker_type::track_type& track,
+      const Kokkos::Impl::ViewMapping<Traits, typename Traits::specialize>& map)
+      : m_track(track), m_map() {
+    using Mapping =
+        Kokkos::Impl::ViewMapping<traits, Traits, typename traits::specialize>;
+    static_assert(Mapping::is_assignable,
+                  "Incompatible View copy construction");
+    Mapping::assign(m_map, map, track);
+  }
+
+  //----------------------------------------
+  // Memory span required to wrap these dimensions.
+  static constexpr size_t required_allocation_size(
+      typename traits::array_layout const& layout) {
+    return map_type::memory_span(layout);
+  }
+
+  static constexpr size_t required_allocation_size(
+      const size_t arg_N0 = 0, const size_t arg_N1 = 0, const size_t arg_N2 = 0,
+      const size_t arg_N3 = 0, const size_t arg_N4 = 0, const size_t arg_N5 = 0,
+      const size_t arg_N6 = 0, const size_t arg_N7 = 0) {
+    static_assert(traits::array_layout::is_extent_constructible,
+                  "Layout is not constructible from extent arguments. Use "
+                  "overload taking a layout object instead.");
+    return map_type::memory_span(typename traits::array_layout(
+        arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7));
+  }
+
+  explicit KOKKOS_INLINE_FUNCTION View(
+      pointer_type arg_ptr, const size_t arg_N0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : View(Impl::ViewCtorProp<pointer_type>(arg_ptr),
+             typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+                                           arg_N4, arg_N5, arg_N6, arg_N7),
+             check_input_args::yes) {
+    static_assert(traits::array_layout::is_extent_constructible,
+                  "Layout is not constructible from extent arguments. Use "
+                  "overload taking a layout object instead.");
+  }
+
+  explicit KOKKOS_INLINE_FUNCTION View(
+      pointer_type arg_ptr, const typename traits::array_layout& arg_layout)
+      : View(Impl::ViewCtorProp<pointer_type>(arg_ptr), arg_layout) {}
+
+  //----------------------------------------
+  // Shared scratch memory constructor
+
+  static KOKKOS_INLINE_FUNCTION size_t
+  shmem_size(const size_t arg_N0 = KOKKOS_INVALID_INDEX,
+             const size_t arg_N1 = KOKKOS_INVALID_INDEX,
+             const size_t arg_N2 = KOKKOS_INVALID_INDEX,
+             const size_t arg_N3 = KOKKOS_INVALID_INDEX,
+             const size_t arg_N4 = KOKKOS_INVALID_INDEX,
+             const size_t arg_N5 = KOKKOS_INVALID_INDEX,
+             const size_t arg_N6 = KOKKOS_INVALID_INDEX,
+             const size_t arg_N7 = KOKKOS_INVALID_INDEX) {
+    static_assert(traits::array_layout::is_extent_constructible,
+                  "Layout is not constructible from extent arguments. Use "
+                  "overload taking a layout object instead.");
+    const size_t num_passed_args = Impl::count_valid_integers(
+        arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7);
+
+    if (std::is_void<typename traits::specialize>::value &&
+        num_passed_args != traits::rank_dynamic) {
+      Kokkos::abort(
+          "Kokkos::View::shmem_size() rank_dynamic != number of arguments.\n");
+    }
+
+    return View::shmem_size(typename traits::array_layout(
+        arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6, arg_N7));
+  }
+
+  static KOKKOS_INLINE_FUNCTION size_t
+  shmem_size(typename traits::array_layout const& arg_layout) {
+    return map_type::memory_span(arg_layout) +
+           sizeof(typename traits::value_type);
+  }
+
+  explicit KOKKOS_INLINE_FUNCTION View(
+      const typename traits::execution_space::scratch_memory_space& arg_space,
+      const typename traits::array_layout& arg_layout)
+      : View(Impl::ViewCtorProp<pointer_type>(
+                 reinterpret_cast<pointer_type>(arg_space.get_shmem_aligned(
+                     map_type::memory_span(arg_layout),
+                     sizeof(typename traits::value_type)))),
+             arg_layout) {}
+
+  explicit KOKKOS_INLINE_FUNCTION View(
+      const typename traits::execution_space::scratch_memory_space& arg_space,
+      const size_t arg_N0 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N1 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N2 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N3 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N4 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N5 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N6 = KOKKOS_IMPL_CTOR_DEFAULT_ARG,
+      const size_t arg_N7 = KOKKOS_IMPL_CTOR_DEFAULT_ARG)
+      : View(Impl::ViewCtorProp<pointer_type>(
+                 reinterpret_cast<pointer_type>(arg_space.get_shmem_aligned(
+                     map_type::memory_span(typename traits::array_layout(
+                         arg_N0, arg_N1, arg_N2, arg_N3, arg_N4, arg_N5, arg_N6,
+                         arg_N7)),
+                     sizeof(typename traits::value_type)))),
+             typename traits::array_layout(arg_N0, arg_N1, arg_N2, arg_N3,
+                                           arg_N4, arg_N5, arg_N6, arg_N7),
+             check_input_args::yes) {
+    static_assert(traits::array_layout::is_extent_constructible,
+                  "Layout is not constructible from extent arguments. Use "
+                  "overload taking a layout object instead.");
+  }
+};
+
+/** \brief Temporary free function rank()
+ *         until rank() is implemented
+ *         in the View
+ */
+template <typename D, class... P>
+KOKKOS_INLINE_FUNCTION constexpr unsigned rank(const View<D, P...>& V) {
+  return V.Rank;
+}  // Temporary until added to view
+
+namespace Impl {
+
+template <typename ValueType, unsigned int Rank>
+struct RankDataType {
+  using type = typename RankDataType<ValueType, Rank - 1>::type*;
+};
+
+template <typename ValueType>
+struct RankDataType<ValueType, 0> {
+  using type = ValueType;
+};
+
+template <unsigned N, typename... Args>
+KOKKOS_FUNCTION std::enable_if_t<N == View<Args...>::Rank, View<Args...>>
+as_view_of_rank_n(View<Args...> v) {
+  return v;
+}
+
+// Placeholder implementation to compile generic code for DynRankView; should
+// never be called
+template <unsigned N, typename T, typename... Args>
+std::enable_if_t<
+    N != View<T, Args...>::Rank,
+    View<typename RankDataType<typename View<T, Args...>::value_type, N>::type,
+         Args...>>
+as_view_of_rank_n(View<T, Args...>) {
+  Kokkos::Impl::throw_runtime_exception(
+      "Trying to get at a View of the wrong rank");
+  return {};
+}
+
+template <typename Function, typename... Args>
+void apply_to_view_of_static_rank(Function&& f, View<Args...> a) {
+  f(a);
+}
+
+}  // namespace Impl
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class V, class... Args>
+using Subview =
+    typename Kokkos::Impl::ViewMapping<void /* deduce subview type from source
+                                               view traits */
+                                       ,
+                                       typename V::traits, Args...>::type;
+
+template <class D, class... P, class... Args>
+KOKKOS_INLINE_FUNCTION
+    typename Kokkos::Impl::ViewMapping<void /* deduce subview type from source
+                                               view traits */
+                                       ,
+                                       ViewTraits<D, P...>, Args...>::type
+    subview(const View<D, P...>& src, Args... args) {
+  static_assert(View<D, P...>::Rank == sizeof...(Args),
+                "subview requires one argument for each source View rank");
+
+  return typename Kokkos::Impl::ViewMapping<
+      void /* deduce subview type from source view traits */
+      ,
+      ViewTraits<D, P...>, Args...>::type(src, args...);
+}
+
+template <class MemoryTraits, class D, class... P, class... Args>
+KOKKOS_INLINE_FUNCTION typename Kokkos::Impl::ViewMapping<
+    void /* deduce subview type from source view traits */
+    ,
+    ViewTraits<D, P...>, Args...>::template apply<MemoryTraits>::type
+subview(const View<D, P...>& src, Args... args) {
+  static_assert(View<D, P...>::Rank == sizeof...(Args),
+                "subview requires one argument for each source View rank");
+
+  return typename Kokkos::Impl::ViewMapping<
+      void /* deduce subview type from source view traits */
+      ,
+      ViewTraits<D, P...>,
+      Args...>::template apply<MemoryTraits>::type(src, args...);
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator==(const View<LT, LP...>& lhs,
+                                       const View<RT, RP...>& rhs) {
+  // Same data, layout, dimensions
+  using lhs_traits = ViewTraits<LT, LP...>;
+  using rhs_traits = ViewTraits<RT, RP...>;
+
+  return std::is_same<typename lhs_traits::const_value_type,
+                      typename rhs_traits::const_value_type>::value &&
+         std::is_same<typename lhs_traits::array_layout,
+                      typename rhs_traits::array_layout>::value &&
+         std::is_same<typename lhs_traits::memory_space,
+                      typename rhs_traits::memory_space>::value &&
+         unsigned(lhs_traits::rank) == unsigned(rhs_traits::rank) &&
+         lhs.data() == rhs.data() && lhs.span() == rhs.span() &&
+         lhs.extent(0) == rhs.extent(0) && lhs.extent(1) == rhs.extent(1) &&
+         lhs.extent(2) == rhs.extent(2) && lhs.extent(3) == rhs.extent(3) &&
+         lhs.extent(4) == rhs.extent(4) && lhs.extent(5) == rhs.extent(5) &&
+         lhs.extent(6) == rhs.extent(6) && lhs.extent(7) == rhs.extent(7);
+}
+
+template <class LT, class... LP, class RT, class... RP>
+KOKKOS_INLINE_FUNCTION bool operator!=(const View<LT, LP...>& lhs,
+                                       const View<RT, RP...>& rhs) {
+  return !(operator==(lhs, rhs));
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+inline void shared_allocation_tracking_disable() {
+  Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_disable();
+}
+
+inline void shared_allocation_tracking_enable() {
+  Kokkos::Impl::SharedAllocationRecord<void, void>::tracking_enable();
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Specialize, typename A, typename B>
+struct CommonViewValueType;
+
+template <typename A, typename B>
+struct CommonViewValueType<void, A, B> {
+  using value_type = std::common_type_t<A, B>;
+};
+
+template <class Specialize, class ValueType>
+struct CommonViewAllocProp;
+
+template <class ValueType>
+struct CommonViewAllocProp<void, ValueType> {
+  using value_type        = ValueType;
+  using scalar_array_type = ValueType;
+
+  template <class... Views>
+  KOKKOS_INLINE_FUNCTION CommonViewAllocProp(const Views&...) {}
+};
+
+template <class... Views>
+struct DeduceCommonViewAllocProp;
+
+// Base case must provide types for:
+// 1. specialize  2. value_type  3. is_view  4. prop_type
+template <class FirstView>
+struct DeduceCommonViewAllocProp<FirstView> {
+  using specialize = typename FirstView::traits::specialize;
+
+  using value_type = typename FirstView::traits::value_type;
+
+  enum : bool { is_view = is_view<FirstView>::value };
+
+  using prop_type = CommonViewAllocProp<specialize, value_type>;
+};
+
+template <class FirstView, class... NextViews>
+struct DeduceCommonViewAllocProp<FirstView, NextViews...> {
+  using NextTraits = DeduceCommonViewAllocProp<NextViews...>;
+
+  using first_specialize = typename FirstView::traits::specialize;
+  using first_value_type = typename FirstView::traits::value_type;
+
+  enum : bool { first_is_view = is_view<FirstView>::value };
+
+  using next_specialize = typename NextTraits::specialize;
+  using next_value_type = typename NextTraits::value_type;
+
+  enum : bool { next_is_view = NextTraits::is_view };
+
+  // common types
+
+  // determine specialize type
+  // if first and next specialize differ, but are not the same specialize, error
+  // out
+  static_assert(!(!std::is_same<first_specialize, next_specialize>::value &&
+                  !std::is_void<first_specialize>::value &&
+                  !std::is_void<next_specialize>::value),
+                "Kokkos DeduceCommonViewAllocProp ERROR: Only one non-void "
+                "specialize trait allowed");
+
+  // otherwise choose non-void specialize if either/both are non-void
+  using specialize = std::conditional_t<
+      std::is_same<first_specialize, next_specialize>::value, first_specialize,
+      std::conditional_t<(std::is_void<first_specialize>::value &&
+                          !std::is_void<next_specialize>::value),
+                         next_specialize, first_specialize>>;
+
+  using value_type = typename CommonViewValueType<specialize, first_value_type,
+                                                  next_value_type>::value_type;
+
+  enum : bool { is_view = (first_is_view && next_is_view) };
+
+  using prop_type = CommonViewAllocProp<specialize, value_type>;
+};
+
+}  // end namespace Impl
+
+template <class... Views>
+using DeducedCommonPropsType =
+    typename Impl::DeduceCommonViewAllocProp<Views...>::prop_type;
+
+// This function is required in certain scenarios where users customize
+// Kokkos View internals. One example are dynamic length embedded ensemble
+// types. The function is used to propagate necessary information
+// (like the ensemble size) when creating new views.
+// However, most of the time it is called with a single view.
+// Furthermore, the propagated information is not just for view allocations.
+// From what I can tell, the type of functionality provided by
+// common_view_alloc_prop is the equivalent of propagating accessors in mdspan,
+// a mechanism we will eventually use to replace this clunky approach here, when
+// we are finally mdspan based.
+// TODO: get rid of this when we have mdspan
+template <class... Views>
+KOKKOS_INLINE_FUNCTION DeducedCommonPropsType<Views...> common_view_alloc_prop(
+    Views const&... views) {
+  return DeducedCommonPropsType<Views...>(views...);
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+namespace Kokkos {
+namespace Impl {
+
+template <class T>
+using is_view KOKKOS_DEPRECATED_WITH_COMMENT("Use Kokkos::is_view instead!") =
+    Kokkos::is_view<T>;
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+#endif
+
+#include <impl/Kokkos_ViewUniformType.hpp>
+#include <impl/Kokkos_Atomic_View.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_VIEW_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_WorkGraphPolicy.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_WorkGraphPolicy.hpp
new file mode 100644 (file)
index 0000000..fafd825
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_WORKGRAPHPOLICY_HPP
+#define KOKKOS_WORKGRAPHPOLICY_HPP
+
+#include <impl/Kokkos_AnalyzePolicy.hpp>
+#include <Kokkos_Crs.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class functor_type, class execution_space, class... policy_args>
+class WorkGraphExec;
+
+}
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <class... Properties>
+class WorkGraphPolicy : public Kokkos::Impl::PolicyTraits<Properties...> {
+ public:
+  using execution_policy = WorkGraphPolicy<Properties...>;
+  using self_type        = WorkGraphPolicy<Properties...>;
+  using traits           = Kokkos::Impl::PolicyTraits<Properties...>;
+  using index_type       = typename traits::index_type;
+  using member_type      = index_type;
+  using execution_space  = typename traits::execution_space;
+  using memory_space     = typename execution_space::memory_space;
+  using graph_type = Kokkos::Crs<index_type, execution_space, void, index_type>;
+
+  enum : std::int32_t {
+    END_TOKEN       = -1,
+    BEGIN_TOKEN     = -2,
+    COMPLETED_TOKEN = -3
+  };
+
+ private:
+  using ints_type = Kokkos::View<std::int32_t*, memory_space>;
+
+  // Let N = m_graph.numRows(), the total work
+  // m_queue[  0 ..   N-1] = the ready queue
+  // m_queue[  N .. 2*N-1] = the waiting queue counts
+  // m_queue[2*N .. 2*N+2] = the ready queue hints
+
+  graph_type const m_graph;
+  ints_type m_queue;
+
+  KOKKOS_INLINE_FUNCTION
+  void push_work(const std::int32_t w) const noexcept {
+    const std::int32_t N = m_graph.numRows();
+
+    std::int32_t volatile* const ready_queue = &m_queue[0];
+    std::int32_t volatile* const end_hint    = &m_queue[2 * N + 1];
+
+    // Push work to end of queue
+    const std::int32_t j = atomic_fetch_add(end_hint, 1);
+
+    if ((N <= j) || (END_TOKEN != atomic_exchange(ready_queue + j, w))) {
+      // ERROR: past the end of queue or did not replace END_TOKEN
+      Kokkos::abort("WorkGraphPolicy push_work error");
+    }
+
+    memory_fence();
+  }
+
+ public:
+  /**\brief  Attempt to pop the work item at the head of the queue.
+   *
+   *  Find entry 'i' such that
+   *    ( m_queue[i] != BEGIN_TOKEN ) AND
+   *    ( i == 0 OR m_queue[i-1] == BEGIN_TOKEN )
+   *  if found then
+   *    increment begin hint
+   *    return atomic_exchange( m_queue[i] , BEGIN_TOKEN )
+   *  else if i < total work
+   *    return END_TOKEN
+   *  else
+   *    return COMPLETED_TOKEN
+   *
+   */
+  KOKKOS_INLINE_FUNCTION
+  std::int32_t pop_work() const noexcept {
+    const std::int32_t N = m_graph.numRows();
+
+    std::int32_t volatile* const ready_queue = &m_queue[0];
+    std::int32_t volatile* const begin_hint  = &m_queue[2 * N];
+
+    // begin hint is guaranteed to be less than or equal to
+    // actual begin location in the queue.
+
+    for (std::int32_t i = *begin_hint; i < N; ++i) {
+      const std::int32_t w = ready_queue[i];
+
+      if (w == END_TOKEN) {
+        return END_TOKEN;
+      }
+
+      if ((w != BEGIN_TOKEN) &&
+          (w == atomic_compare_exchange(ready_queue + i, w,
+                                        (std::int32_t)BEGIN_TOKEN))) {
+        // Attempt to claim ready work index succeeded,
+        // update the hint and return work index
+        atomic_increment(begin_hint);
+        return w;
+      }
+      // arrive here when ready_queue[i] == BEGIN_TOKEN
+    }
+
+    return COMPLETED_TOKEN;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void completed_work(std::int32_t w) const noexcept {
+    Kokkos::memory_fence();
+
+    // Make sure the completed work function's memory accesses are flushed.
+
+    const std::int32_t N = m_graph.numRows();
+
+    std::int32_t volatile* const count_queue = &m_queue[N];
+
+    const std::int32_t B = m_graph.row_map(w);
+    const std::int32_t E = m_graph.row_map(w + 1);
+
+    for (std::int32_t i = B; i < E; ++i) {
+      const std::int32_t j = m_graph.entries(i);
+      if (1 == atomic_fetch_add(count_queue + j, -1)) {
+        push_work(j);
+      }
+    }
+  }
+
+  struct TagInit {};
+  struct TagCount {};
+  struct TagReady {};
+
+  /**\brief  Initialize queue
+   *
+   *  m_queue[0..N-1] = END_TOKEN, the ready queue
+   *  m_queue[N..2*N-1] = 0, the waiting count queue
+   *  m_queue[2*N..2*N+1] = 0, begin/end hints for ready queue
+   */
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const TagInit, int i) const noexcept {
+    m_queue[i] = i < m_graph.numRows() ? END_TOKEN : 0;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const TagCount, int i) const noexcept {
+    std::int32_t volatile* const count_queue = &m_queue[m_graph.numRows()];
+
+    atomic_increment(count_queue + m_graph.entries[i]);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const TagReady, int w) const noexcept {
+    std::int32_t const* const count_queue = &m_queue[m_graph.numRows()];
+
+    if (0 == count_queue[w]) push_work(w);
+  }
+
+  execution_space space() const { return execution_space(); }
+
+  WorkGraphPolicy(const graph_type& arg_graph)
+      : m_graph(arg_graph),
+        m_queue(view_alloc("queue", WithoutInitializing),
+                arg_graph.numRows() * 2 + 2) {
+    {  // Initialize
+      using policy_type  = RangePolicy<std::int32_t, execution_space, TagInit>;
+      using closure_type = Kokkos::Impl::ParallelFor<self_type, policy_type>;
+      const closure_type closure(*this, policy_type(0, m_queue.size()));
+      closure.execute();
+      execution_space().fence(
+          "Kokkos::WorkGraphPolicy::WorkGraphPolicy: fence after executing "
+          "graph init");
+    }
+
+    {  // execute-after counts
+      using policy_type  = RangePolicy<std::int32_t, execution_space, TagCount>;
+      using closure_type = Kokkos::Impl::ParallelFor<self_type, policy_type>;
+      const closure_type closure(*this, policy_type(0, m_graph.entries.size()));
+      closure.execute();
+      execution_space().fence(
+          "Kokkos::WorkGraphPolicy::WorkGraphPolicy: fence after executing "
+          "graph count");
+    }
+
+    {  // Scheduling ready tasks
+      using policy_type  = RangePolicy<std::int32_t, execution_space, TagReady>;
+      using closure_type = Kokkos::Impl::ParallelFor<self_type, policy_type>;
+      const closure_type closure(*this, policy_type(0, m_graph.numRows()));
+      closure.execute();
+      execution_space().fence(
+          "Kokkos::WorkGraphPolicy::WorkGraphPolicy: fence after executing "
+          "readied graph");
+    }
+  }
+};
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_ENABLE_SERIAL
+#include "Serial/Kokkos_Serial_WorkGraphPolicy.hpp"
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMP
+#include "OpenMP/Kokkos_OpenMP_WorkGraphPolicy.hpp"
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA
+#include "Cuda/Kokkos_Cuda_WorkGraphPolicy.hpp"
+#endif
+
+#ifdef KOKKOS_ENABLE_HIP
+#include "HIP/Kokkos_HIP_WorkGraphPolicy.hpp"
+#endif
+
+#ifdef KOKKOS_ENABLE_THREADS
+#include "Threads/Kokkos_Threads_WorkGraphPolicy.hpp"
+#endif
+
+#ifdef KOKKOS_ENABLE_HPX
+#include "HPX/Kokkos_HPX_WorkGraphPolicy.hpp"
+#endif
+
+#endif /* #define KOKKOS_WORKGRAPHPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Kokkos_hwloc.hpp b/bundled/kokkos-3.7.00/core/src/Kokkos_hwloc.hpp
new file mode 100644 (file)
index 0000000..abbec54
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+#ifndef KOKKOS_HWLOC_HPP
+#define KOKKOS_HWLOC_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <utility>
+
+namespace Kokkos {
+
+/** \brief  Minimal subset of logical 'hwloc' functionality available
+ *          from http://www.open-mpi.org/projects/hwloc/.
+ *
+ *  The calls are NOT thread safe in order to avoid mutexes,
+ *  memory allocations, or other actions which could give the
+ *  runtime system an opportunity to migrate the threads or
+ *  touch allocated memory during the function calls.
+ *
+ *  All calls to these functions should be performed by a thread
+ *  when it has guaranteed exclusive access; e.g., for OpenMP
+ *  within a 'critical' region.
+ */
+namespace hwloc {
+
+/** \brief  Query if hwloc is available */
+bool available();
+
+/** \brief  Query number of available NUMA regions.
+ *          This will be less than the hardware capacity
+ *          if the MPI process is pinned to a NUMA region.
+ */
+unsigned get_available_numa_count();
+
+/** \brief  Query number of available cores per NUMA regions.
+ *          This will be less than the hardware capacity
+ *          if the MPI process is pinned to a set of cores.
+ */
+unsigned get_available_cores_per_numa();
+
+/** \brief  Query number of available "hard" threads per core; i.e.,
+ * hyperthreads */
+unsigned get_available_threads_per_core();
+
+} /* namespace hwloc */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// Internal functions for binding persistent spawned threads.
+
+namespace Kokkos {
+namespace hwloc {
+
+/** \brief  Recommend mapping of threads onto cores.
+ *
+ * If thread_count == 0 then choose and set a value.
+ * If use_numa_count == 0 then choose and set a value.
+ * If use_cores_per_numa == 0 then choose and set a value.
+ *
+ * Return 0 if asynchronous,
+ * Return 1 if synchronous and threads_coord[0] is process core
+ */
+unsigned thread_mapping(const char* const label, const bool allow_async,
+                        unsigned& thread_count, unsigned& use_numa_count,
+                        unsigned& use_cores_per_numa,
+                        std::pair<unsigned, unsigned> threads_coord[]);
+
+/** \brief  Query core-coordinate of the current thread
+ *          with respect to the core_topology.
+ *
+ *  As long as the thread is running within the
+ *  process binding the following condition holds.
+ *
+ *  core_coordinate.first  < core_topology.first
+ *  core_coordinate.second < core_topology.second
+ */
+std::pair<unsigned, unsigned> get_this_thread_coordinate();
+
+/** \brief  Bind the current thread to a core. */
+bool bind_this_thread(const std::pair<unsigned, unsigned>);
+
+/** \brief Can hwloc bind threads? */
+bool can_bind_threads();
+
+/** \brief  Bind the current thread to one of the cores in the list.
+ *          Set that entry to (~0,~0) and return the index.
+ *          If binding fails return ~0.
+ */
+unsigned bind_this_thread(const unsigned coordinate_count,
+                          std::pair<unsigned, unsigned> coordinate[]);
+
+/** \brief  Unbind the current thread back to the original process binding */
+bool unbind_this_thread();
+
+} /* namespace hwloc */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #define KOKKOS_HWLOC_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC.cpp b/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC.cpp
new file mode 100644 (file)
index 0000000..f321609
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_Instance.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <ostream>
+
+Kokkos::Experimental::OpenACC::OpenACC()
+    : m_space_instance(Impl::OpenACCInternal::singleton()) {}
+
+void Kokkos::Experimental::OpenACC::impl_initialize(
+    InitializationSettings const& settings) {
+  Impl::OpenACCInternal::singleton()->initialize(settings);
+}
+
+void Kokkos::Experimental::OpenACC::impl_finalize() {
+  Impl::OpenACCInternal::singleton()->finalize();
+}
+
+bool Kokkos::Experimental::OpenACC::impl_is_initialized() {
+  return Impl::OpenACCInternal::singleton()->is_initialized();
+}
+
+void Kokkos::Experimental::OpenACC::print_configuration(std::ostream& os,
+                                                        bool verbose) const {
+  os << "macro KOKKOS_ENABLE_OPENACC is defined\n";  // FIXME_OPENACC
+  m_space_instance->print_configuration(os, verbose);
+}
+
+void Kokkos::Experimental::OpenACC::fence(std::string const& name) const {
+  Impl::OpenACCInternal::singleton()->fence(name);
+}
+
+void Kokkos::Experimental::OpenACC::impl_static_fence(std::string const& name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::OpenACC>(
+      name,
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          GlobalDeviceSynchronization,
+      [&]() { acc_wait_all(); });
+}
+
+uint32_t Kokkos::Experimental::OpenACC::impl_instance_id() const noexcept {
+  return m_space_instance->instance_id();
+}
+
+namespace Kokkos {
+namespace Impl {
+int g_openacc_space_factory_initialized =
+    initialize_space_factory<Experimental::OpenACC>("170_OpenACC");
+}  // namespace Impl
+}  // Namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC.hpp b/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC.hpp
new file mode 100644 (file)
index 0000000..3ad5905
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+
+#ifndef KOKKOS_OPENACC_HPP
+#define KOKKOS_OPENACC_HPP
+
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+#include <Kokkos_Concepts.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_ScratchSpace.hpp>
+#include <impl/Kokkos_InitializationSettings.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
+
+#include <openacc.h>
+
+#include <iosfwd>
+#include <string>
+
+namespace Kokkos::Experimental::Impl {
+class OpenACCInternal;
+}
+
+namespace Kokkos::Experimental {
+
+class OpenACC {
+  Impl::OpenACCInternal* m_space_instance = nullptr;
+
+ public:
+  using execution_space = OpenACC;
+  using memory_space    = OpenACCSpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+
+  using array_layout = LayoutLeft;
+  using size_type    = memory_space::size_type;
+
+  using scratch_memory_space = ScratchMemorySpace<OpenACC>;
+
+  OpenACC();
+
+  static void impl_initialize(InitializationSettings const& settings);
+  static void impl_finalize();
+  static bool impl_is_initialized();
+
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  void fence(std::string const& name =
+                 "Kokkos::OpenACC::fence(): Unnamed Instance Fence") const;
+  static void impl_static_fence(std::string const& name);
+
+  static char const* name() { return "OpenACC"; }
+  static int concurrency() { return 256000; }  // FIXME_OPENACC
+  static bool in_parallel() { return acc_on_device(acc_device_not_host); }
+  uint32_t impl_instance_id() const noexcept;
+};
+
+}  // namespace Kokkos::Experimental
+
+template <>
+struct Kokkos::Tools::Experimental::DeviceTypeTraits<
+    ::Kokkos::Experimental::OpenACC> {
+  static constexpr DeviceType id =
+      ::Kokkos::Profiling::Experimental::DeviceType::OpenACC;
+  // FIXME_OPENACC: Need to return the device id from the execution space
+  // instance. In fact, acc_get_device_num() will return the same value as the
+  // device id from the execution space instance except for the host fallback
+  // case, where the device id may need to be updated with the value of
+  // acc_get_device_num().
+  static int device_id(const Kokkos::Experimental::OpenACC&) {
+    using Kokkos::Experimental::Impl::OpenACC_Traits;
+    return acc_get_device_num(OpenACC_Traits::dev_type);
+  }
+};
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACCSpace.cpp b/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACCSpace.cpp
new file mode 100644 (file)
index 0000000..bc2ba18
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+
+#include <openacc.h>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+    const Kokkos::Experimental::OpenACC &exec_space,
+    const size_t arg_alloc_size) const {
+  return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+    const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+    const Kokkos::Experimental::OpenACC &exec_space, const char *arg_label,
+    const size_t arg_alloc_size, const size_t arg_logical_size) const {
+  return impl_allocate(exec_space, arg_label, arg_alloc_size, arg_logical_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::allocate(
+    const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+
+void *Kokkos::Experimental::OpenACCSpace::impl_allocate(
+    const Kokkos::Experimental::OpenACC &exec_space, const char *arg_label,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  static_assert(sizeof(void *) == sizeof(uintptr_t),
+                "Error sizeof(void*) != sizeof(uintptr_t)");
+
+  void *ptr = nullptr;
+
+  // FIXME_OPENACC multiple device instances are not yet supported, and thus
+  // exec_space is ignored for now.
+  (void)exec_space;
+
+  ptr = acc_malloc(arg_alloc_size);
+
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+
+  return ptr;
+}
+
+void *Kokkos::Experimental::OpenACCSpace::impl_allocate(
+    const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  static_assert(sizeof(void *) == sizeof(uintptr_t),
+                "Error sizeof(void*) != sizeof(uintptr_t)");
+
+  void *ptr = nullptr;
+
+  //[DEBUG] Disabled due to the synchronous behavior of the current
+  // implementation.
+  /*
+    OpenACC::impl_static_fence(
+        "Kokkos::OpenACCSpace::impl_allocate: Pre OpenACC Allocation");
+  */
+
+  ptr = acc_malloc(arg_alloc_size);
+
+  //[DEBUG] Disabled due to the synchronous behavior of the current
+  // implementation.
+  /*
+    OpenACC::impl_static_fence(
+        "Kokkos::OpenACCSpace::impl_allocate: Post OpenACC Allocation");
+  */
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+
+  return ptr;
+}
+
+void Kokkos::Experimental::OpenACCSpace::deallocate(
+    void *const arg_alloc_ptr, const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void Kokkos::Experimental::OpenACCSpace::deallocate(
+    const char *arg_label, void *const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+
+void Kokkos::Experimental::OpenACCSpace::impl_deallocate(
+    const char *arg_label, void *const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+
+  if (arg_alloc_ptr) {
+    acc_free(arg_alloc_ptr);
+  }
+}
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#ifdef KOKKOS_ENABLE_DEBUG
+Kokkos::Impl::SharedAllocationRecord<void, void> SharedAllocationRecord<
+    Kokkos::Experimental::OpenACCSpace, void>::s_root_record;
+#endif
+
+Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace,
+                                     void>::~SharedAllocationRecord() {
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     (SharedAllocationRecord<void, void>::m_alloc_size -
+                      sizeof(SharedAllocationHeader)));
+}
+
+Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::OpenACCSpace &arg_space,
+        const std::string &arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace,
+                                  void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, arg_label);
+
+  Kokkos::Impl::DeepCopy<Experimental::OpenACCSpace, HostSpace>(
+      RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
+  Kokkos::fence(
+      "SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace, "
+      "void>::SharedAllocationRecord(): fence after copying header from "
+      "HostSpace");
+}
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+// To avoid additional compilation cost for something that's (mostly?) not
+// performance sensitive, we explicitly instantiate these CRTP base classes
+// here, where we have access to the associated *_timpl.hpp header files.
+template class Kokkos::Impl::HostInaccessibleSharedAllocationRecordCommon<
+    Kokkos::Experimental::OpenACCSpace>;
+template class Kokkos::Impl::SharedAllocationRecordCommon<
+    Kokkos::Experimental::OpenACCSpace>;
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
diff --git a/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACCSpace.hpp b/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACCSpace.hpp
new file mode 100644 (file)
index 0000000..a7347e8
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#include <Kokkos_Macros.hpp>
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+static_assert(false,
+              "Including non-public Kokkos header files is not allowed.");
+#else
+KOKKOS_IMPL_WARNING("Including non-public Kokkos header files is not allowed.")
+#endif
+#endif
+
+#ifndef KOKKOS_OPENACC_SPACE_HPP
+#define KOKKOS_OPENACC_SPACE_HPP
+
+#include <Kokkos_Concepts.hpp>
+
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+#include <openacc.h>
+#include <iosfwd>
+
+namespace Kokkos::Experimental {
+
+class OpenACC;
+
+class OpenACCSpace {
+ public:
+  using memory_space    = OpenACCSpace;
+  using execution_space = OpenACC;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+
+  using size_type = size_t;
+
+  OpenACCSpace() = default;
+
+  /**\brief  Allocate untracked memory in the space */
+  void* allocate(const Kokkos::Experimental::OpenACC& exec_space,
+                 const size_t arg_alloc_size) const;
+  void* allocate(const Kokkos::Experimental::OpenACC& exec_space,
+                 const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+  void* allocate(const size_t arg_alloc_size) const;
+  void* allocate(const char* arg_label, const size_t arg_alloc_size,
+                 const size_t arg_logical_size = 0) const;
+
+  /**\brief  Deallocate untracked memory in the space */
+  void deallocate(void* const arg_alloc_ptr, const size_t arg_alloc_size) const;
+  void deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                  const size_t arg_alloc_size,
+                  const size_t arg_logical_size = 0) const;
+
+  static constexpr char const* name() { return "OpenACCSpace"; }
+
+ private:
+  void* impl_allocate(const Kokkos::Experimental::OpenACC& exec_space,
+                      const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void* impl_allocate(const char* arg_label, const size_t arg_alloc_size,
+                      const size_t arg_logical_size = 0,
+                      const Kokkos::Tools::SpaceHandle =
+                          Kokkos::Tools::make_space_handle(name())) const;
+  void impl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                       const size_t arg_alloc_size,
+                       const size_t arg_logical_size = 0,
+                       const Kokkos::Tools::SpaceHandle =
+                           Kokkos::Tools::make_space_handle(name())) const;
+};
+
+}  // namespace Kokkos::Experimental
+
+/*--------------------------------------------------------------------------*/
+
+template <>
+struct Kokkos::Impl::MemorySpaceAccess<Kokkos::HostSpace,
+                                       Kokkos::Experimental::OpenACCSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+                                       Kokkos::HostSpace> {
+  enum : bool { assignable = false };
+  enum : bool { accessible = false };
+  enum : bool { deepcopy = true };
+};
+
+template <>
+struct Kokkos::Impl::MemorySpaceAccess<Kokkos::Experimental::OpenACCSpace,
+                                       Kokkos::Experimental::OpenACCSpace> {
+  enum : bool { assignable = true };
+  enum : bool { accessible = true };
+  enum : bool { deepcopy = true };
+};
+/*--------------------------------------------------------------------------*/
+
+template <>
+class Kokkos::Impl::SharedAllocationRecord<Kokkos::Experimental::OpenACCSpace,
+                                           void>
+    : public HostInaccessibleSharedAllocationRecordCommon<
+          Kokkos::Experimental::OpenACCSpace> {
+ private:
+  friend class HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::OpenACCSpace>;
+  friend class SharedAllocationRecordCommon<Kokkos::Experimental::OpenACCSpace>;
+  friend Kokkos::Experimental::OpenACCSpace;
+
+  using base_t = HostInaccessibleSharedAllocationRecordCommon<
+      Kokkos::Experimental::OpenACCSpace>;
+  using RecordBase = SharedAllocationRecord<void, void>;
+
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  /**\brief  Root record for tracked allocations from this OpenACCSpace
+   * instance */
+  static RecordBase s_root_record;
+
+  const Kokkos::Experimental::OpenACCSpace m_space;
+
+ protected:
+  ~SharedAllocationRecord();
+  SharedAllocationRecord() = default;
+
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(
+      const ExecutionSpace& /*exec_space*/,
+      const Kokkos::Experimental::OpenACCSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate)
+      : SharedAllocationRecord(arg_space, arg_label, arg_alloc_size,
+                               arg_dealloc) {}
+
+  SharedAllocationRecord(
+      const Kokkos::Experimental::OpenACCSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size,
+      const RecordBase::function_type arg_dealloc = &deallocate);
+
+ public:
+  KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
+      const Kokkos::Experimental::OpenACCSpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc_size) {
+    if (acc_on_device(acc_device_host)) {
+      return new SharedAllocationRecord(arg_space, arg_label, arg_alloc_size);
+    } else {
+      return nullptr;
+    }
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+// FIXME_OPENACC: Need to update the DeepCopy implementations below to support
+// multiple execution space instances.
+// The current OpenACC backend implementation assumes that there is only one
+// device execution space instance, and all the device operations (e.g., memory
+// transfers, kernel launches, etc.) are implemented to be synchronous, which
+// does not violate the Kokkos execution semantics with the single execution
+// space instance.
+template <class ExecutionSpace>
+struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
+                              Kokkos::Experimental::OpenACCSpace,
+                              ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    // The behavior of acc_memcpy_device when bytes argument is zero is
+    // clarified only in the latest OpenACC specification (V3.2), and thus the
+    // value checking is added as a safeguard. (The current NVHPC (V22.5)
+    // supports OpenACC V2.7.)
+    if (n > 0) acc_memcpy_device(dst, const_cast<void*>(src), n);
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence();
+    if (n > 0) acc_memcpy_device(dst, const_cast<void*>(src), n);
+  }
+};
+
+template <class ExecutionSpace>
+struct Kokkos::Impl::DeepCopy<Kokkos::Experimental::OpenACCSpace,
+                              Kokkos::HostSpace, ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    if (n > 0) acc_memcpy_to_device(dst, const_cast<void*>(src), n);
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence();
+    if (n > 0) acc_memcpy_to_device(dst, const_cast<void*>(src), n);
+  }
+};
+
+template <class ExecutionSpace>
+struct Kokkos::Impl::DeepCopy<
+    Kokkos::HostSpace, Kokkos::Experimental::OpenACCSpace, ExecutionSpace> {
+  DeepCopy(void* dst, const void* src, size_t n) {
+    if (n > 0) acc_memcpy_from_device(dst, const_cast<void*>(src), n);
+  }
+  DeepCopy(const ExecutionSpace& exec, void* dst, const void* src, size_t n) {
+    exec.fence();
+    if (n > 0) acc_memcpy_from_device(dst, const_cast<void*>(src), n);
+  }
+};
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Instance.cpp b/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Instance.cpp
new file mode 100644 (file)
index 0000000..15d3880
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+
+#include <OpenACC/Kokkos_OpenACC_Instance.hpp>
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+
+#include <openacc.h>
+
+#include <iostream>
+
+namespace Kokkos {
+bool show_warnings() noexcept;
+}
+
+Kokkos::Experimental::Impl::OpenACCInternal*
+Kokkos::Experimental::Impl::OpenACCInternal::singleton() {
+  static OpenACCInternal self;
+  return &self;
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::initialize(
+    InitializationSettings const& settings) {
+  if (OpenACC_Traits::may_fallback_to_host &&
+      acc_get_num_devices(OpenACC_Traits::dev_type) == 0 &&
+      !settings.has_device_id()) {
+    if (show_warnings()) {
+      std::cerr << "Warning: No GPU available for execution, falling back to"
+                   " using the host!"
+                << std::endl;
+    }
+    acc_set_device_type(acc_device_host);
+    // FIXME_OPENACC if multiple execution space instances are supported,
+    // device id variable should be explicitly set to the value returned by
+    // acc_get_device_num(acc_device_host).
+  } else {
+    using Kokkos::Impl::get_gpu;
+    int const dev_num = get_gpu(settings);
+    acc_set_device_num(dev_num, OpenACC_Traits::dev_type);
+  }
+  m_is_initialized = true;
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::finalize() {
+  m_is_initialized = false;
+}
+
+bool Kokkos::Experimental::Impl::OpenACCInternal::is_initialized() const {
+  return m_is_initialized;
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::print_configuration(
+    std::ostream& os, bool /*verbose*/) const {
+  os << "Using OpenACC\n";  // FIXME_OPENACC
+}
+
+void Kokkos::Experimental::Impl::OpenACCInternal::fence(
+    std::string const& name) const {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::OpenACC>(
+      name,
+      Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id()},
+      [&]() {
+        //[DEBUG] disabled due to synchronous behaviors of the current
+        // parallel construct implementations. acc_wait_all();
+      });
+}
+
+uint32_t Kokkos::Experimental::Impl::OpenACCInternal::instance_id() const
+    noexcept {
+  return Kokkos::Tools::Experimental::Impl::idForInstance<
+      Kokkos::Experimental::OpenACC>(reinterpret_cast<uintptr_t>(this));
+}
diff --git a/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Instance.hpp b/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Instance.hpp
new file mode 100644 (file)
index 0000000..cb69b4a
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENACC_INSTANCE_HPP
+#define KOKKOS_OPENACC_INSTANCE_HPP
+
+#include <impl/Kokkos_InitializationSettings.hpp>
+
+#include <cstdint>
+#include <iosfwd>
+#include <string>
+
+namespace Kokkos::Experimental::Impl {
+
+class OpenACCInternal {
+  bool m_is_initialized = false;
+
+  OpenACCInternal()                       = default;
+  OpenACCInternal(const OpenACCInternal&) = default;
+  OpenACCInternal& operator=(const OpenACCInternal&) = default;
+
+ public:
+  static OpenACCInternal* singleton();
+
+  void initialize(InitializationSettings const& settings);
+  void finalize();
+  bool is_initialized() const;
+
+  void print_configuration(std::ostream& os, bool verbose = false) const;
+
+  void fence(std::string const& name) const;
+
+  uint32_t instance_id() const noexcept;
+};
+
+}  // namespace Kokkos::Experimental::Impl
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Traits.hpp b/bundled/kokkos-3.7.00/core/src/OpenACC/Kokkos_OpenACC_Traits.hpp
new file mode 100644 (file)
index 0000000..f9451ec
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENACC_TRAITS_HPP
+#define KOKKOS_OPENACC_TRAITS_HPP
+
+#include <openacc.h>
+
+namespace Kokkos::Experimental::Impl {
+
+struct OpenACC_Traits {
+#if defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) || \
+    defined(KOKKOS_ARCH_AMPERE)
+  static constexpr acc_device_t dev_type     = acc_device_nvidia;
+  static constexpr bool may_fallback_to_host = false;
+#else
+  static constexpr acc_device_t dev_type     = acc_device_not_host;
+  static constexpr bool may_fallback_to_host = true;
+#endif
+};
+
+}  // namespace Kokkos::Experimental::Impl
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Instance.cpp b/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Instance.cpp
new file mode 100644 (file)
index 0000000..2397aa4
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <cstdio>
+#include <cstdlib>
+
+#include <limits>
+#include <iostream>
+#include <vector>
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_CPUDiscovery.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+int g_openmp_hardware_max_threads = 1;
+
+thread_local int t_openmp_hardware_id = 0;
+// FIXME_OPENMP we can remove this after we remove partition_master
+thread_local OpenMPInternal *t_openmp_instance = nullptr;
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+void OpenMPInternal::validate_partition_impl(const int nthreads,
+                                             int &num_partitions,
+                                             int &partition_size) {
+  if (nthreads == 1) {
+    num_partitions = 1;
+    partition_size = 1;
+  } else if (num_partitions < 1 && partition_size < 1) {
+    int idle = nthreads;
+    for (int np = 2; np <= nthreads; ++np) {
+      for (int ps = 1; ps <= nthreads / np; ++ps) {
+        if (nthreads - np * ps < idle) {
+          idle           = nthreads - np * ps;
+          num_partitions = np;
+          partition_size = ps;
+        }
+        if (idle == 0) {
+          break;
+        }
+      }
+    }
+  } else if (num_partitions < 1 && partition_size > 0) {
+    if (partition_size <= nthreads) {
+      num_partitions = nthreads / partition_size;
+    } else {
+      num_partitions = 1;
+      partition_size = nthreads;
+    }
+  } else if (num_partitions > 0 && partition_size < 1) {
+    if (num_partitions <= nthreads) {
+      partition_size = nthreads / num_partitions;
+    } else {
+      num_partitions = nthreads;
+      partition_size = 1;
+    }
+  } else if (num_partitions * partition_size > nthreads) {
+    int idle     = nthreads;
+    const int NP = num_partitions;
+    const int PS = partition_size;
+    for (int np = NP; np > 0; --np) {
+      for (int ps = PS; ps > 0; --ps) {
+        if ((np * ps <= nthreads) && (nthreads - np * ps < idle)) {
+          idle           = nthreads - np * ps;
+          num_partitions = np;
+          partition_size = ps;
+        }
+        if (idle == 0) {
+          break;
+        }
+      }
+    }
+  }
+}
+#endif
+
+void OpenMPInternal::clear_thread_data() {
+  const size_t member_bytes =
+      sizeof(int64_t) *
+      HostThreadTeamData::align_to_int64(sizeof(HostThreadTeamData));
+
+  const int old_alloc_bytes =
+      m_pool[0] ? (member_bytes + m_pool[0]->scratch_bytes()) : 0;
+
+  OpenMP::memory_space space;
+
+#pragma omp parallel num_threads(m_pool_size)
+  {
+    const int rank = omp_get_thread_num();
+
+    if (nullptr != m_pool[rank]) {
+      m_pool[rank]->disband_pool();
+
+      space.deallocate(m_pool[rank], old_alloc_bytes);
+
+      m_pool[rank] = nullptr;
+    }
+  }
+  /* END #pragma omp parallel */
+}
+
+void OpenMPInternal::resize_thread_data(size_t pool_reduce_bytes,
+                                        size_t team_reduce_bytes,
+                                        size_t team_shared_bytes,
+                                        size_t thread_local_bytes) {
+  const size_t member_bytes =
+      sizeof(int64_t) *
+      HostThreadTeamData::align_to_int64(sizeof(HostThreadTeamData));
+
+  HostThreadTeamData *root = m_pool[0];
+
+  const size_t old_pool_reduce  = root ? root->pool_reduce_bytes() : 0;
+  const size_t old_team_reduce  = root ? root->team_reduce_bytes() : 0;
+  const size_t old_team_shared  = root ? root->team_shared_bytes() : 0;
+  const size_t old_thread_local = root ? root->thread_local_bytes() : 0;
+  const size_t old_alloc_bytes =
+      root ? (member_bytes + root->scratch_bytes()) : 0;
+
+  // Allocate if any of the old allocation is tool small:
+
+  const bool allocate = (old_pool_reduce < pool_reduce_bytes) ||
+                        (old_team_reduce < team_reduce_bytes) ||
+                        (old_team_shared < team_shared_bytes) ||
+                        (old_thread_local < thread_local_bytes);
+
+  if (allocate) {
+    if (pool_reduce_bytes < old_pool_reduce) {
+      pool_reduce_bytes = old_pool_reduce;
+    }
+    if (team_reduce_bytes < old_team_reduce) {
+      team_reduce_bytes = old_team_reduce;
+    }
+    if (team_shared_bytes < old_team_shared) {
+      team_shared_bytes = old_team_shared;
+    }
+    if (thread_local_bytes < old_thread_local) {
+      thread_local_bytes = old_thread_local;
+    }
+
+    const size_t alloc_bytes =
+        member_bytes +
+        HostThreadTeamData::scratch_size(pool_reduce_bytes, team_reduce_bytes,
+                                         team_shared_bytes, thread_local_bytes);
+
+    OpenMP::memory_space space;
+
+    memory_fence();
+
+#pragma omp parallel num_threads(m_pool_size)
+    {
+      const int rank = omp_get_thread_num();
+
+      if (nullptr != m_pool[rank]) {
+        m_pool[rank]->disband_pool();
+
+        space.deallocate(m_pool[rank], old_alloc_bytes);
+      }
+
+      void *ptr = nullptr;
+      try {
+        ptr = space.allocate(alloc_bytes);
+      } catch (
+          Kokkos::Experimental::RawMemoryAllocationFailure const &failure) {
+        // For now, just rethrow the error message the existing way
+        Kokkos::Impl::throw_runtime_exception(failure.get_error_message());
+      }
+
+      m_pool[rank] = new (ptr) HostThreadTeamData();
+
+      m_pool[rank]->scratch_assign(((char *)ptr) + member_bytes, alloc_bytes,
+                                   pool_reduce_bytes, team_reduce_bytes,
+                                   team_shared_bytes, thread_local_bytes);
+
+      memory_fence();
+    }
+    /* END #pragma omp parallel */
+
+    HostThreadTeamData::organize_pool(m_pool, m_pool_size);
+  }
+}
+
+OpenMPInternal &OpenMPInternal::singleton() {
+  static OpenMPInternal *self = nullptr;
+  if (self == nullptr) {
+    self = new OpenMPInternal(get_current_max_threads());
+  }
+
+  return *self;
+}
+
+int OpenMPInternal::get_current_max_threads() noexcept {
+  // Using omp_get_max_threads(); is problematic in conjunction with
+  // Hwloc on Intel (essentially an initial call to the OpenMP runtime
+  // without a parallel region before will set a process mask for a single core
+  // The runtime will than bind threads for a parallel region to other cores on
+  // the entering the first parallel region and make the process mask the
+  // aggregate of the thread masks. The intend seems to be to make serial code
+  // run fast, if you compile with OpenMP enabled but don't actually use
+  // parallel regions or so static int omp_max_threads = omp_get_max_threads();
+
+  int count = 0;
+#pragma omp parallel
+  {
+#pragma omp atomic
+    ++count;
+  }
+  return count;
+}
+
+void OpenMPInternal::initialize(int thread_count) {
+  if (m_initialized) {
+    Kokkos::abort(
+        "Calling OpenMP::initialize after OpenMP::finalize is illegal\n");
+  }
+
+  if (omp_in_parallel()) {
+    std::string msg("Kokkos::OpenMP::initialize ERROR : in parallel");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+
+  {
+    if (Kokkos::show_warnings() && nullptr == std::getenv("OMP_PROC_BIND")) {
+      printf(
+          "Kokkos::OpenMP::initialize WARNING: OMP_PROC_BIND environment "
+          "variable not set\n");
+      printf(
+          "  In general, for best performance with OpenMP 4.0 or better set "
+          "OMP_PROC_BIND=spread and OMP_PLACES=threads\n");
+      printf("  For best performance with OpenMP 3.1 set OMP_PROC_BIND=true\n");
+      printf("  For unit testing set OMP_PROC_BIND=false\n");
+    }
+
+    OpenMP::memory_space space;
+
+    // Before any other call to OMP query the maximum number of threads
+    // and save the value for re-initialization unit testing.
+
+    Impl::g_openmp_hardware_max_threads = get_current_max_threads();
+
+    int process_num_threads = Impl::g_openmp_hardware_max_threads;
+
+    if (Kokkos::hwloc::available()) {
+      process_num_threads = Kokkos::hwloc::get_available_numa_count() *
+                            Kokkos::hwloc::get_available_cores_per_numa() *
+                            Kokkos::hwloc::get_available_threads_per_core();
+    }
+
+    // if thread_count  < 0, use g_openmp_hardware_max_threads;
+    // if thread_count == 0, set g_openmp_hardware_max_threads to
+    // process_num_threads if thread_count  > 0, set
+    // g_openmp_hardware_max_threads to thread_count
+    if (thread_count < 0) {
+      thread_count = Impl::g_openmp_hardware_max_threads;
+    } else if (thread_count == 0) {
+      if (Impl::g_openmp_hardware_max_threads != process_num_threads) {
+        Impl::g_openmp_hardware_max_threads = process_num_threads;
+        omp_set_num_threads(Impl::g_openmp_hardware_max_threads);
+      }
+    } else {
+      if (Kokkos::show_warnings() && thread_count > process_num_threads) {
+        printf(
+            "Kokkos::OpenMP::initialize WARNING: You are likely "
+            "oversubscribing your CPU cores.\n");
+        printf("  process threads available : %3d,  requested thread : %3d\n",
+               process_num_threads, thread_count);
+      }
+      Impl::g_openmp_hardware_max_threads = thread_count;
+      omp_set_num_threads(Impl::g_openmp_hardware_max_threads);
+    }
+
+// setup thread local
+#pragma omp parallel num_threads(Impl::g_openmp_hardware_max_threads)
+    {
+      Impl::t_openmp_hardware_id = omp_get_thread_num();
+      Impl::SharedAllocationRecord<void, void>::tracking_enable();
+    }
+
+    auto &instance       = OpenMPInternal::singleton();
+    instance.m_pool_size = Impl::g_openmp_hardware_max_threads;
+
+    // New, unified host thread team data:
+    {
+      size_t pool_reduce_bytes  = 32 * thread_count;
+      size_t team_reduce_bytes  = 32 * thread_count;
+      size_t team_shared_bytes  = 1024 * thread_count;
+      size_t thread_local_bytes = 1024;
+
+      instance.resize_thread_data(pool_reduce_bytes, team_reduce_bytes,
+                                  team_shared_bytes, thread_local_bytes);
+    }
+  }
+
+  // Check for over-subscription
+  if (Kokkos::show_warnings() &&
+      (Impl::mpi_ranks_per_node() * long(thread_count) >
+       Impl::processors_per_node())) {
+    std::cerr << "Kokkos::OpenMP::initialize WARNING: You are likely "
+                 "oversubscribing your CPU cores."
+              << std::endl;
+    std::cerr << "                                    Detected: "
+              << Impl::processors_per_node() << " cores per node." << std::endl;
+    std::cerr << "                                    Detected: "
+              << Impl::mpi_ranks_per_node() << " MPI_ranks per node."
+              << std::endl;
+    std::cerr << "                                    Requested: "
+              << thread_count << " threads per process." << std::endl;
+  }
+  // Init the array for used for arbitrarily sized atomics
+  init_lock_array_host_space();
+
+  m_initialized = true;
+}
+
+void OpenMPInternal::finalize() {
+  if (omp_in_parallel()) {
+    std::string msg("Kokkos::OpenMP::finalize ERROR ");
+    if (this != &singleton()) msg.append(": not initialized");
+    if (omp_in_parallel()) msg.append(": in parallel");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+
+  if (this == &singleton()) {
+    auto const &instance = singleton();
+    // Silence Cuda Warning
+    const int nthreads =
+        instance.m_pool_size <= Impl::g_openmp_hardware_max_threads
+            ? Impl::g_openmp_hardware_max_threads
+            : instance.m_pool_size;
+    (void)nthreads;
+
+#pragma omp parallel num_threads(nthreads)
+    {
+      Impl::t_openmp_hardware_id = 0;
+      Impl::SharedAllocationRecord<void, void>::tracking_disable();
+    }
+
+    // allow main thread to track
+    Impl::SharedAllocationRecord<void, void>::tracking_enable();
+
+    Impl::g_openmp_hardware_max_threads = 1;
+  }
+
+  m_initialized = false;
+
+  Kokkos::Profiling::finalize();
+}
+
+void OpenMPInternal::print_configuration(std::ostream &s) const {
+  s << "Kokkos::OpenMP";
+
+  if (m_initialized) {
+    const int numa_count      = 1;
+    const int core_per_numa   = Impl::g_openmp_hardware_max_threads;
+    const int thread_per_core = 1;
+
+    s << " thread_pool_topology[ " << numa_count << " x " << core_per_numa
+      << " x " << thread_per_core << " ]" << std::endl;
+  } else {
+    s << " not initialized" << std::endl;
+  }
+}
+
+bool OpenMPInternal::verify_is_initialized(const char *const label) const {
+  if (!m_initialized) {
+    std::cerr << "Kokkos::OpenMP " << label
+              << " : ERROR OpenMP is not initialized" << std::endl;
+  }
+  return m_initialized;
+}
+}  // namespace Impl
+
+//----------------------------------------------------------------------------
+
+OpenMP::OpenMP()
+#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
+    : m_space_instance(&Impl::OpenMPInternal::singleton()) {
+}
+#else
+    : m_space_instance(&Impl::OpenMPInternal::singleton(),
+                       [](Impl::OpenMPInternal *) {}) {
+  Impl::OpenMPInternal::singleton().verify_is_initialized(
+      "OpenMP instance constructor");
+}
+#endif
+
+int OpenMP::impl_get_current_max_threads() noexcept {
+  return Impl::OpenMPInternal::get_current_max_threads();
+}
+
+void OpenMP::impl_initialize(InitializationSettings const &settings) {
+  Impl::OpenMPInternal::singleton().initialize(
+      settings.has_num_threads() ? settings.get_num_threads() : -1);
+}
+
+void OpenMP::impl_finalize() { Impl::OpenMPInternal::singleton().finalize(); }
+
+void OpenMP::print_configuration(std::ostream &os, bool /*verbose*/) const {
+  os << "Host Parallel Execution Space:\n";
+  os << "  KOKKOS_ENABLE_OPENMP: yes\n";
+
+  os << "OpenMP Atomics:\n";
+  os << "  KOKKOS_ENABLE_OPENMP_ATOMICS: ";
+#ifdef KOKKOS_ENABLE_OPENMP_ATOMICS
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+
+  os << "\nOpenMP Runtime Configuration:\n";
+
+  m_space_instance->print_configuration(os);
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+std::vector<OpenMP> OpenMP::partition(...) { return std::vector<OpenMP>(1); }
+
+OpenMP OpenMP::create_instance(...) { return OpenMP(); }
+#endif
+
+int OpenMP::concurrency() { return Impl::g_openmp_hardware_max_threads; }
+
+void OpenMP::fence(const std::string &name) const {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::OpenMP>(
+      name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1}, []() {});
+}
+
+namespace Impl {
+
+int g_openmp_space_factory_initialized =
+    initialize_space_factory<OpenMP>("050_OpenMP");
+
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_CXX14
+namespace Tools {
+namespace Experimental {
+constexpr DeviceType DeviceTypeTraits<OpenMP>::id;
+}
+}  // namespace Tools
+#endif
+
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Instance.hpp b/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Instance.hpp
new file mode 100644 (file)
index 0000000..1a2ee95
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMP_INSTANCE_HPP
+#define KOKKOS_OPENMP_INSTANCE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if !defined(_OPENMP) && !defined(__CUDA_ARCH__) && \
+    !defined(__HIP_DEVICE_COMPILE__) && !defined(__SYCL_DEVICE_ONLY__)
+#error \
+    "You enabled Kokkos OpenMP support without enabling OpenMP in the compiler!"
+#endif
+
+#include <Kokkos_OpenMP.hpp>
+
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+
+#include <Kokkos_Atomic.hpp>
+
+#include <Kokkos_UniqueToken.hpp>
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+
+#include <omp.h>
+
+namespace Kokkos {
+namespace Impl {
+
+class OpenMPInternal;
+
+extern int g_openmp_hardware_max_threads;
+
+extern thread_local int t_openmp_hardware_id;
+// FIXME_OPENMP we can remove this after we remove partition_master
+extern thread_local OpenMPInternal* t_openmp_instance;
+
+struct OpenMPTraits {
+  static int constexpr MAX_THREAD_COUNT = 512;
+};
+
+class OpenMPInternal {
+ private:
+  OpenMPInternal(int arg_pool_size)
+      : m_pool_size{arg_pool_size}, m_level{omp_get_level()}, m_pool() {}
+
+  ~OpenMPInternal() { clear_thread_data(); }
+
+  static int get_current_max_threads() noexcept;
+
+  bool m_initialized = false;
+
+  int m_pool_size;
+  int m_level;
+
+  HostThreadTeamData* m_pool[OpenMPTraits::MAX_THREAD_COUNT];
+
+ public:
+  friend class Kokkos::OpenMP;
+
+  static OpenMPInternal& singleton();
+
+  void initialize(int thread_cound);
+
+  void finalize();
+
+  void clear_thread_data();
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  KOKKOS_DEPRECATED static void validate_partition(const int nthreads,
+                                                   int& num_partitions,
+                                                   int& partition_size) {
+    validate_partition_impl(nthreads, num_partitions, partition_size);
+  }
+  static void validate_partition_impl(const int nthreads, int& num_partitions,
+                                      int& partition_size);
+#endif
+
+  void resize_thread_data(size_t pool_reduce_bytes, size_t team_reduce_bytes,
+                          size_t team_shared_bytes, size_t thread_local_bytes);
+
+  HostThreadTeamData* get_thread_data() const noexcept {
+    return m_pool[m_level == omp_get_level() ? 0 : omp_get_thread_num()];
+  }
+
+  HostThreadTeamData* get_thread_data(int i) const noexcept {
+    return m_pool[i];
+  }
+
+  bool is_initialized() const { return m_initialized; }
+
+  bool verify_is_initialized(const char* const label) const;
+
+  void print_configuration(std::ostream& s) const;
+};
+
+}  // namespace Impl
+inline bool OpenMP::impl_is_initialized() noexcept {
+  return Impl::OpenMPInternal::singleton().is_initialized();
+}
+
+inline bool OpenMP::in_parallel(OpenMP const&) noexcept {
+  // FIXME_OPENMP We are forced to use t_openmp_instance because the function is
+  // static and does not use the OpenMP object
+  return ((Impl::OpenMPInternal::singleton().m_level < omp_get_level()) &&
+          (!Impl::t_openmp_instance ||
+           Impl::t_openmp_instance->m_level < omp_get_level()));
+}
+
+inline int OpenMP::impl_thread_pool_size() noexcept {
+  // FIXME_OPENMP We are forced to use t_openmp_instance because the function is
+  // static
+  return OpenMP::in_parallel()
+             ? omp_get_num_threads()
+             : (Impl::t_openmp_instance
+                    ? Impl::t_openmp_instance->m_pool_size
+                    : Impl::OpenMPInternal::singleton().m_pool_size);
+}
+
+KOKKOS_INLINE_FUNCTION
+int OpenMP::impl_thread_pool_rank() noexcept {
+  // FIXME_OPENMP We are forced to use t_openmp_instance because the function is
+  // static
+  KOKKOS_IF_ON_HOST(
+      (return Impl::t_openmp_instance ? 0 : omp_get_thread_num();))
+
+  KOKKOS_IF_ON_DEVICE((return -1;))
+}
+
+inline void OpenMP::impl_static_fence(std::string const& name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::OpenMP>(
+      name,
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          GlobalDeviceSynchronization,
+      []() {});
+}
+
+inline bool OpenMP::is_asynchronous(OpenMP const& /*instance*/) noexcept {
+  return false;
+}
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <typename F>
+KOKKOS_DEPRECATED void OpenMP::partition_master(F const& f, int num_partitions,
+                                                int partition_size) {
+#if _OPENMP >= 201511
+  if (omp_get_max_active_levels() > 1) {
+#else
+  if (omp_get_nested()) {
+#endif
+    using Exec = Impl::OpenMPInternal;
+
+    Exec* prev_instance = &Impl::OpenMPInternal::singleton();
+
+    Exec::validate_partition_impl(prev_instance->m_pool_size, num_partitions,
+                                  partition_size);
+
+    OpenMP::memory_space space;
+
+#pragma omp parallel num_threads(num_partitions)
+    {
+      Exec thread_local_instance(partition_size);
+      Impl::t_openmp_instance = &thread_local_instance;
+
+      size_t pool_reduce_bytes  = 32 * partition_size;
+      size_t team_reduce_bytes  = 32 * partition_size;
+      size_t team_shared_bytes  = 1024 * partition_size;
+      size_t thread_local_bytes = 1024;
+
+      thread_local_instance.resize_thread_data(
+          pool_reduce_bytes, team_reduce_bytes, team_shared_bytes,
+          thread_local_bytes);
+
+      omp_set_num_threads(partition_size);
+      f(omp_get_thread_num(), omp_get_num_threads());
+      Impl::t_openmp_instance = nullptr;
+    }
+  } else {
+    // nested openmp not enabled
+    f(0, 1);
+  }
+}
+#endif
+
+namespace Experimental {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+template <>
+class MasterLock<OpenMP> {
+ public:
+  void lock() { omp_set_lock(&m_lock); }
+  void unlock() { omp_unset_lock(&m_lock); }
+  bool try_lock() { return static_cast<bool>(omp_test_lock(&m_lock)); }
+
+  KOKKOS_DEPRECATED MasterLock() { omp_init_lock(&m_lock); }
+  ~MasterLock() { omp_destroy_lock(&m_lock); }
+
+  MasterLock(MasterLock const&) = delete;
+  MasterLock(MasterLock&&)      = delete;
+  MasterLock& operator=(MasterLock const&) = delete;
+  MasterLock& operator=(MasterLock&&) = delete;
+
+ private:
+  omp_lock_t m_lock;
+};
+#endif
+
+template <>
+class UniqueToken<OpenMP, UniqueTokenScope::Instance> {
+ private:
+  using buffer_type = Kokkos::View<uint32_t*, Kokkos::HostSpace>;
+  int m_count;
+  buffer_type m_buffer_view;
+  uint32_t volatile* m_buffer;
+
+ public:
+  using execution_space = OpenMP;
+  using size_type       = int;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const& = execution_space()) noexcept
+      : m_count(::Kokkos::OpenMP::impl_thread_pool_size()),
+        m_buffer_view(buffer_type()),
+        m_buffer(nullptr) {}
+
+  UniqueToken(size_type max_size, execution_space const& = execution_space())
+      : m_count(max_size),
+        m_buffer_view("UniqueToken::m_buffer_view",
+                      ::Kokkos::Impl::concurrent_bitset::buffer_bound(m_count)),
+        m_buffer(m_buffer_view.data()) {}
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int size() const noexcept {
+    KOKKOS_IF_ON_HOST((return m_count;))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int acquire() const noexcept {
+    KOKKOS_IF_ON_HOST(
+        (if (m_count >= ::Kokkos::OpenMP::impl_thread_pool_size()) return ::
+             Kokkos::OpenMP::impl_thread_pool_rank();
+         const ::Kokkos::pair<int, int> result =
+             ::Kokkos::Impl::concurrent_bitset::acquire_bounded(
+                 m_buffer, m_count, ::Kokkos::Impl::clock_tic() % m_count);
+
+         if (result.first < 0) {
+           ::Kokkos::abort(
+               "UniqueToken<OpenMP> failure to acquire tokens, no tokens "
+               "available");
+         }
+
+         return result.first;))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(int i) const noexcept {
+    KOKKOS_IF_ON_HOST(
+        (if (m_count < ::Kokkos::OpenMP::impl_thread_pool_size()) {
+          ::Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
+        }))
+
+    KOKKOS_IF_ON_DEVICE(((void)i;))
+  }
+};
+
+template <>
+class UniqueToken<OpenMP, UniqueTokenScope::Global> {
+ public:
+  using execution_space = OpenMP;
+  using size_type       = int;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const& = execution_space()) noexcept {}
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int size() const noexcept {
+    KOKKOS_IF_ON_HOST((return Kokkos::Impl::g_openmp_hardware_max_threads;))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int acquire() const noexcept {
+    KOKKOS_IF_ON_HOST((return Kokkos::Impl::t_openmp_hardware_id;))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(int) const noexcept {}
+};
+
+}  // namespace Experimental
+
+inline int OpenMP::impl_thread_pool_size(int depth) {
+  return depth < 2 ? impl_thread_pool_size() : 1;
+}
+
+KOKKOS_INLINE_FUNCTION
+int OpenMP::impl_hardware_thread_id() noexcept {
+  KOKKOS_IF_ON_HOST((return Impl::t_openmp_hardware_id;))
+
+  KOKKOS_IF_ON_DEVICE((return -1;))
+}
+
+inline int OpenMP::impl_max_hardware_threads() noexcept {
+  return Impl::g_openmp_hardware_max_threads;
+}
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Parallel.hpp b/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Parallel.hpp
new file mode 100644 (file)
index 0000000..94c465d
--- /dev/null
@@ -0,0 +1,1262 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMP_PARALLEL_HPP
+#define KOKKOS_OPENMP_PARALLEL_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_OPENMP)
+
+#include <omp.h>
+#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#define KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#undef KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+#define KOKKOS_PRAGMA_IVDEP_IF_ENABLED _Pragma("ivdep")
+#endif
+
+#ifndef KOKKOS_COMPILER_NVHPC
+#define KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE , m_policy.chunk_size()
+#else
+#define KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>, Kokkos::OpenMP> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  inline static void exec_range(const FunctorType& functor, const Member ibeg,
+                                const Member iend) {
+    KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+    for (auto iwork = ibeg; iwork < iend; ++iwork) {
+      exec_work(functor, iwork);
+    }
+  }
+
+  template <class Enable = WorkTag>
+  inline static std::enable_if_t<std::is_void<WorkTag>::value &&
+                                 std::is_same<Enable, WorkTag>::value>
+  exec_work(const FunctorType& functor, const Member iwork) {
+    functor(iwork);
+  }
+
+  template <class Enable = WorkTag>
+  inline static std::enable_if_t<!std::is_void<WorkTag>::value &&
+                                 std::is_same<Enable, WorkTag>::value>
+  exec_work(const FunctorType& functor, const Member iwork) {
+    functor(WorkTag{}, iwork);
+  }
+
+  template <class Policy>
+  std::enable_if_t<std::is_same<typename Policy::schedule_type::type,
+                                Kokkos::Dynamic>::value>
+  execute_parallel() const {
+    // prevent bug in NVHPC 21.9/CUDA 11.4 (entering zero iterations loop)
+    if (m_policy.begin() >= m_policy.end()) return;
+#pragma omp parallel for schedule(dynamic KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
+    num_threads(OpenMP::impl_thread_pool_size())
+    KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+    for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
+      exec_work(m_functor, iwork);
+    }
+  }
+
+  template <class Policy>
+  std::enable_if_t<!std::is_same<typename Policy::schedule_type::type,
+                                 Kokkos::Dynamic>::value>
+  execute_parallel() const {
+#pragma omp parallel for schedule(static KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
+    num_threads(OpenMP::impl_thread_pool_size())
+    KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+    for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
+      exec_work(m_functor, iwork);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    if (OpenMP::in_parallel()) {
+      exec_range(m_functor, m_policy.begin(), m_policy.end());
+      return;
+    }
+
+#ifndef KOKKOS_INTERNAL_DISABLE_NATIVE_OPENMP
+    execute_parallel<Policy>();
+#else
+    constexpr bool is_dynamic =
+        std::is_same<typename Policy::schedule_type::type,
+                     Kokkos::Dynamic>::value;
+#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+      data.set_work_partition(m_policy.end() - m_policy.begin(),
+                              m_policy.chunk_size());
+
+      if (is_dynamic) {
+        // Make sure work partition is set before stealing
+        if (data.pool_rendezvous()) data.pool_rendezvous_release();
+      }
+
+      std::pair<int64_t, int64_t> range(0, 0);
+
+      do {
+        range = is_dynamic ? data.get_work_stealing_chunk()
+                           : data.get_work_partition();
+
+        exec_range(m_functor, range.first + m_policy.begin(),
+                   range.second + m_policy.begin());
+
+      } while (is_dynamic && 0 <= range.first);
+    }
+#endif
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+      : m_instance(nullptr), m_functor(arg_functor), m_policy(arg_policy) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+  }
+};
+
+// MDRangePolicy impl
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                  Kokkos::OpenMP> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+  using WorkTag       = typename MDRangePolicy::work_tag;
+
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using iterate_type = typename Kokkos::Impl::HostIterateTile<
+      MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;  // construct as RangePolicy( 0, num_tiles
+                          // ).set_chunk_size(1) in ctor
+
+  inline static void exec_range(const MDRangePolicy& mdr_policy,
+                                const FunctorType& functor, const Member ibeg,
+                                const Member iend) {
+    KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      iterate_type(mdr_policy, functor)(iwork);
+    }
+  }
+
+  template <class Policy>
+  typename std::enable_if_t<std::is_same<typename Policy::schedule_type::type,
+                                         Kokkos::Dynamic>::value>
+  execute_parallel() const {
+#pragma omp parallel for schedule(dynamic KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
+    num_threads(OpenMP::impl_thread_pool_size())
+    KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+    for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
+      iterate_type(m_mdr_policy, m_functor)(iwork);
+    }
+  }
+
+  template <class Policy>
+  typename std::enable_if<!std::is_same<typename Policy::schedule_type::type,
+                                        Kokkos::Dynamic>::value>::type
+  execute_parallel() const {
+#pragma omp parallel for schedule(static KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE) \
+    num_threads(OpenMP::impl_thread_pool_size())
+    KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+    for (auto iwork = m_policy.begin(); iwork < m_policy.end(); ++iwork) {
+      iterate_type(m_mdr_policy, m_functor)(iwork);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    if (OpenMP::in_parallel()) {
+      ParallelFor::exec_range(m_mdr_policy, m_functor, m_policy.begin(),
+                              m_policy.end());
+      return;
+    }
+
+#ifndef KOKKOS_INTERNAL_DISABLE_NATIVE_OPENMP
+    execute_parallel<Policy>();
+#else
+    constexpr bool is_dynamic =
+        std::is_same<typename Policy::schedule_type::type,
+                     Kokkos::Dynamic>::value;
+
+#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+      data.set_work_partition(m_policy.end() - m_policy.begin(),
+                              m_policy.chunk_size());
+
+      if (is_dynamic) {
+        // Make sure work partition is set before stealing
+        if (data.pool_rendezvous()) data.pool_rendezvous_release();
+      }
+
+      std::pair<int64_t, int64_t> range(0, 0);
+
+      do {
+        range = is_dynamic ? data.get_work_stealing_chunk()
+                           : data.get_work_partition();
+
+        ParallelFor::exec_range(m_mdr_policy, m_functor,
+                                range.first + m_policy.begin(),
+                                range.second + m_policy.begin());
+
+      } while (is_dynamic && 0 <= range.first);
+    }
+    // END #pragma omp parallel
+#endif
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, MDRangePolicy arg_policy)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+  }
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::OpenMP> {
+ private:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
+                         void>;
+
+  // Static Assert WorkTag void if ReducerType not InvalidType
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const FunctorType& functor, const Member ibeg, const Member iend,
+      reference_type update) {
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      functor(iwork, update);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const FunctorType& functor, const Member ibeg, const Member iend,
+      reference_type update) {
+    const TagType t{};
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      functor(t, iwork, update);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    if (m_policy.end() <= m_policy.begin()) {
+      if (m_result_ptr) {
+        final_reducer.init(m_result_ptr);
+        final_reducer.final(m_result_ptr);
+      }
+      return;
+    }
+    enum {
+      is_dynamic = std::is_same<typename Policy::schedule_type::type,
+                                Kokkos::Dynamic>::value
+    };
+
+    const size_t pool_reduce_bytes =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+
+    m_instance->resize_thread_data(pool_reduce_bytes, 0  // team_reduce_bytes
+                                   ,
+                                   0  // team_shared_bytes
+                                   ,
+                                   0  // thread_local_bytes
+    );
+
+    const int pool_size = OpenMP::impl_thread_pool_size();
+#pragma omp parallel num_threads(pool_size)
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+      data.set_work_partition(m_policy.end() - m_policy.begin(),
+                              m_policy.chunk_size());
+
+      if (is_dynamic) {
+        // Make sure work partition is set before stealing
+        if (data.pool_rendezvous()) data.pool_rendezvous_release();
+      }
+
+      reference_type update = final_reducer.init(
+          reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+      std::pair<int64_t, int64_t> range(0, 0);
+
+      do {
+        range = is_dynamic ? data.get_work_stealing_chunk()
+                           : data.get_work_partition();
+
+        ParallelReduce::template exec_range<WorkTag>(
+            m_functor, range.first + m_policy.begin(),
+            range.second + m_policy.begin(), update);
+
+      } while (is_dynamic && 0 <= range.first);
+    }
+
+    // Reduction:
+
+    const pointer_type ptr =
+        pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
+
+    for (int i = 1; i < pool_size; ++i) {
+      final_reducer.join(
+          ptr, reinterpret_cast<pointer_type>(
+                   m_instance->get_thread_data(i)->pool_reduce_local()));
+    }
+
+    final_reducer.final(ptr);
+
+    if (m_result_ptr) {
+      const int n = Analysis::value_count(
+          ReducerConditional::select(m_functor, m_reducer));
+
+      for (int j = 0; j < n; ++j) {
+        m_result_ptr[j] = ptr[j];
+      }
+    }
+  }
+
+  //----------------------------------------
+
+  template <class ViewType>
+  inline ParallelReduce(
+      const FunctorType& arg_functor, Policy arg_policy,
+      const ViewType& arg_view,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                           !Kokkos::is_reducer<ReducerType>::value,
+                       void*> = nullptr)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_view.data()) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+
+  inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
+                        const ReducerType& reducer)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+};
+
+// MDRangePolicy impl
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::OpenMP> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+
+  using WorkTag   = typename MDRangePolicy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
+                         void>;
+
+  using Analysis = FunctorAnalysis<FunctorPatternInterface::REDUCE,
+                                   MDRangePolicy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+
+  using iterate_type =
+      typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
+                                             WorkTag, reference_type>;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;  // construct as RangePolicy( 0, num_tiles
+                          // ).set_chunk_size(1) in ctor
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  inline static void exec_range(const MDRangePolicy& mdr_policy,
+                                const FunctorType& functor, const Member ibeg,
+                                const Member iend, reference_type update) {
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      iterate_type(mdr_policy, functor, update)(iwork);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    enum {
+      is_dynamic = std::is_same<typename Policy::schedule_type::type,
+                                Kokkos::Dynamic>::value
+    };
+
+    const size_t pool_reduce_bytes =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+
+    m_instance->resize_thread_data(pool_reduce_bytes, 0  // team_reduce_bytes
+                                   ,
+                                   0  // team_shared_bytes
+                                   ,
+                                   0  // thread_local_bytes
+    );
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    const int pool_size = OpenMP::impl_thread_pool_size();
+#pragma omp parallel num_threads(pool_size)
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+      data.set_work_partition(m_policy.end() - m_policy.begin(),
+                              m_policy.chunk_size());
+
+      if (is_dynamic) {
+        // Make sure work partition is set before stealing
+        if (data.pool_rendezvous()) data.pool_rendezvous_release();
+      }
+
+      reference_type update = final_reducer.init(
+          reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+      std::pair<int64_t, int64_t> range(0, 0);
+
+      do {
+        range = is_dynamic ? data.get_work_stealing_chunk()
+                           : data.get_work_partition();
+
+        ParallelReduce::exec_range(m_mdr_policy, m_functor,
+                                   range.first + m_policy.begin(),
+                                   range.second + m_policy.begin(), update);
+
+      } while (is_dynamic && 0 <= range.first);
+    }
+    // END #pragma omp parallel
+
+    // Reduction:
+
+    const pointer_type ptr =
+        pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
+
+    for (int i = 1; i < pool_size; ++i) {
+      final_reducer.join(
+          ptr, reinterpret_cast<pointer_type>(
+                   m_instance->get_thread_data(i)->pool_reduce_local()));
+    }
+
+    final_reducer.final(ptr);
+
+    if (m_result_ptr) {
+      const int n = Analysis::value_count(
+          ReducerConditional::select(m_functor, m_reducer));
+
+      for (int j = 0; j < n; ++j) {
+        m_result_ptr[j] = ptr[j];
+      }
+    }
+  }
+
+  //----------------------------------------
+
+  template <class ViewType>
+  inline ParallelReduce(
+      const FunctorType& arg_functor, MDRangePolicy arg_policy,
+      const ViewType& arg_view,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                           !Kokkos::is_reducer<ReducerType>::value,
+                       void*> = nullptr)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_view.data()) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+
+  inline ParallelReduce(const FunctorType& arg_functor,
+                        MDRangePolicy arg_policy, const ReducerType& reducer)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                   Kokkos::OpenMP> {
+ private:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const FunctorType& functor, const Member ibeg, const Member iend,
+      reference_type update, const bool final) {
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      functor(iwork, update, final);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const FunctorType& functor, const Member ibeg, const Member iend,
+      reference_type update, const bool final) {
+    const TagType t{};
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      functor(t, iwork, update, final);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    const int value_count          = Analysis::value_count(m_functor);
+    const size_t pool_reduce_bytes = 2 * Analysis::value_size(m_functor);
+
+    m_instance->resize_thread_data(pool_reduce_bytes, 0  // team_reduce_bytes
+                                   ,
+                                   0  // team_shared_bytes
+                                   ,
+                                   0  // thread_local_bytes
+    );
+
+#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+      typename Analysis::Reducer final_reducer(&m_functor);
+
+      const WorkRange range(m_policy, omp_get_thread_num(),
+                            omp_get_num_threads());
+
+      reference_type update_sum = final_reducer.init(
+          reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+      ParallelScan::template exec_range<WorkTag>(
+          m_functor, range.begin(), range.end(), update_sum, false);
+
+      if (data.pool_rendezvous()) {
+        pointer_type ptr_prev = nullptr;
+
+        const int n = omp_get_num_threads();
+
+        for (int i = 0; i < n; ++i) {
+          pointer_type ptr =
+              (pointer_type)data.pool_member(i)->pool_reduce_local();
+
+          if (i) {
+            for (int j = 0; j < value_count; ++j) {
+              ptr[j + value_count] = ptr_prev[j + value_count];
+            }
+            final_reducer.join(ptr + value_count, ptr_prev);
+          } else {
+            final_reducer.init(ptr + value_count);
+          }
+
+          ptr_prev = ptr;
+        }
+
+        data.pool_rendezvous_release();
+      }
+
+      reference_type update_base = final_reducer.reference(
+          reinterpret_cast<pointer_type>(data.pool_reduce_local()) +
+          value_count);
+
+      ParallelScan::template exec_range<WorkTag>(
+          m_functor, range.begin(), range.end(), update_base, true);
+    }
+  }
+
+  //----------------------------------------
+
+  inline ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_instance(nullptr), m_functor(arg_functor), m_policy(arg_policy) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+  }
+
+  //----------------------------------------
+};
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::OpenMP> {
+ private:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const Policy m_policy;
+  ReturnType& m_returnvalue;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const FunctorType& functor, const Member ibeg, const Member iend,
+      reference_type update, const bool final) {
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      functor(iwork, update, final);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const FunctorType& functor, const Member ibeg, const Member iend,
+      reference_type update, const bool final) {
+    const TagType t{};
+    for (Member iwork = ibeg; iwork < iend; ++iwork) {
+      functor(t, iwork, update, final);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    const int value_count          = Analysis::value_count(m_functor);
+    const size_t pool_reduce_bytes = 2 * Analysis::value_size(m_functor);
+
+    m_instance->resize_thread_data(pool_reduce_bytes, 0  // team_reduce_bytes
+                                   ,
+                                   0  // team_shared_bytes
+                                   ,
+                                   0  // thread_local_bytes
+    );
+
+#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+      typename Analysis::Reducer final_reducer(&m_functor);
+
+      const WorkRange range(m_policy, omp_get_thread_num(),
+                            omp_get_num_threads());
+      reference_type update_sum = final_reducer.init(
+          reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+      ParallelScanWithTotal::template exec_range<WorkTag>(
+          m_functor, range.begin(), range.end(), update_sum, false);
+
+      if (data.pool_rendezvous()) {
+        pointer_type ptr_prev = nullptr;
+
+        const int n = omp_get_num_threads();
+
+        for (int i = 0; i < n; ++i) {
+          pointer_type ptr =
+              (pointer_type)data.pool_member(i)->pool_reduce_local();
+
+          if (i) {
+            for (int j = 0; j < value_count; ++j) {
+              ptr[j + value_count] = ptr_prev[j + value_count];
+            }
+            final_reducer.join(ptr + value_count, ptr_prev);
+          } else {
+            final_reducer.init(ptr + value_count);
+          }
+
+          ptr_prev = ptr;
+        }
+
+        data.pool_rendezvous_release();
+      }
+
+      reference_type update_base = final_reducer.reference(
+          reinterpret_cast<pointer_type>(data.pool_reduce_local()) +
+          value_count);
+
+      ParallelScanWithTotal::template exec_range<WorkTag>(
+          m_functor, range.begin(), range.end(), update_base, true);
+
+      if (omp_get_thread_num() == omp_get_num_threads() - 1) {
+        m_returnvalue = update_base;
+      }
+    }
+  }
+
+  //----------------------------------------
+
+  inline ParallelScanWithTotal(const FunctorType& arg_functor,
+                               const Policy& arg_policy,
+                               ReturnType& arg_returnvalue)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_returnvalue(arg_returnvalue) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+  }
+
+  //----------------------------------------
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::OpenMP> {
+ private:
+  enum { TEAM_REDUCE_SIZE = 512 };
+
+  using Policy =
+      Kokkos::Impl::TeamPolicyInternal<Kokkos::OpenMP, Properties...>;
+  using WorkTag  = typename Policy::work_tag;
+  using SchedTag = typename Policy::schedule_type::type;
+  using Member   = typename Policy::member_type;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const size_t m_shmem_size;
+
+  template <class TagType>
+  inline static std::enable_if_t<(std::is_void<TagType>::value)> exec_team(
+      const FunctorType& functor, HostThreadTeamData& data,
+      const int league_rank_begin, const int league_rank_end,
+      const int league_size) {
+    for (int r = league_rank_begin; r < league_rank_end;) {
+      functor(Member(data, r, league_size));
+
+      if (++r < league_rank_end) {
+        // Don't allow team members to lap one another
+        // so that they don't overwrite shared memory.
+        if (data.team_rendezvous()) {
+          data.team_rendezvous_release();
+        }
+      }
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<(!std::is_void<TagType>::value)> exec_team(
+      const FunctorType& functor, HostThreadTeamData& data,
+      const int league_rank_begin, const int league_rank_end,
+      const int league_size) {
+    const TagType t{};
+
+    for (int r = league_rank_begin; r < league_rank_end;) {
+      functor(t, Member(data, r, league_size));
+
+      if (++r < league_rank_end) {
+        // Don't allow team members to lap one another
+        // so that they don't overwrite shared memory.
+        if (data.team_rendezvous()) {
+          data.team_rendezvous_release();
+        }
+      }
+    }
+  }
+
+ public:
+  inline void execute() const {
+    enum { is_dynamic = std::is_same<SchedTag, Kokkos::Dynamic>::value };
+
+    const size_t pool_reduce_size  = 0;  // Never shrinks
+    const size_t team_reduce_size  = TEAM_REDUCE_SIZE * m_policy.team_size();
+    const size_t team_shared_size  = m_shmem_size;
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    m_instance->resize_thread_data(pool_reduce_size, team_reduce_size,
+                                   team_shared_size, thread_local_size);
+
+#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+      const int active = data.organize_team(m_policy.team_size());
+
+      if (active) {
+        data.set_work_partition(
+            m_policy.league_size(),
+            (0 < m_policy.chunk_size() ? m_policy.chunk_size()
+                                       : m_policy.team_iter()));
+      }
+
+      if (is_dynamic) {
+        // Must synchronize to make sure each team has set its
+        // partition before beginning the work stealing loop.
+        if (data.pool_rendezvous()) data.pool_rendezvous_release();
+      }
+
+      if (active) {
+        std::pair<int64_t, int64_t> range(0, 0);
+
+        do {
+          range = is_dynamic ? data.get_work_stealing_chunk()
+                             : data.get_work_partition();
+
+          ParallelFor::template exec_team<WorkTag>(m_functor, data, range.first,
+                                                   range.second,
+                                                   m_policy.league_size());
+
+        } while (is_dynamic && 0 <= range.first);
+      }
+
+      data.disband_team();
+    }
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                     FunctorTeamShmemSize<FunctorType>::value(
+                         arg_functor, arg_policy.team_size())) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+  }
+};
+
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::OpenMP> {
+ private:
+  enum { TEAM_REDUCE_SIZE = 512 };
+
+  using Policy =
+      Kokkos::Impl::TeamPolicyInternal<Kokkos::OpenMP, Properties...>;
+
+  using WorkTag  = typename Policy::work_tag;
+  using SchedTag = typename Policy::schedule_type::type;
+  using Member   = typename Policy::member_type;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
+                         void>;
+
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  OpenMPInternal* m_instance;
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const int m_shmem_size;
+
+  template <class TagType>
+  inline static std::enable_if_t<(std::is_void<TagType>::value)> exec_team(
+      const FunctorType& functor, HostThreadTeamData& data,
+      reference_type& update, const int league_rank_begin,
+      const int league_rank_end, const int league_size) {
+    for (int r = league_rank_begin; r < league_rank_end;) {
+      functor(Member(data, r, league_size), update);
+
+      if (++r < league_rank_end) {
+        // Don't allow team members to lap one another
+        // so that they don't overwrite shared memory.
+        if (data.team_rendezvous()) {
+          data.team_rendezvous_release();
+        }
+      }
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<(!std::is_void<TagType>::value)> exec_team(
+      const FunctorType& functor, HostThreadTeamData& data,
+      reference_type& update, const int league_rank_begin,
+      const int league_rank_end, const int league_size) {
+    const TagType t{};
+
+    for (int r = league_rank_begin; r < league_rank_end;) {
+      functor(t, Member(data, r, league_size), update);
+
+      if (++r < league_rank_end) {
+        // Don't allow team members to lap one another
+        // so that they don't overwrite shared memory.
+        if (data.team_rendezvous()) {
+          data.team_rendezvous_release();
+        }
+      }
+    }
+  }
+
+ public:
+  inline void execute() const {
+    enum { is_dynamic = std::is_same<SchedTag, Kokkos::Dynamic>::value };
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    if (m_policy.league_size() == 0 || m_policy.team_size() == 0) {
+      if (m_result_ptr) {
+        final_reducer.init(m_result_ptr);
+        final_reducer.final(m_result_ptr);
+      }
+      return;
+    }
+
+    const size_t pool_reduce_size =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+
+    const size_t team_reduce_size  = TEAM_REDUCE_SIZE * m_policy.team_size();
+    const size_t team_shared_size  = m_shmem_size + m_policy.scratch_size(1);
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    m_instance->resize_thread_data(pool_reduce_size, team_reduce_size,
+                                   team_shared_size, thread_local_size);
+
+    const int pool_size = OpenMP::impl_thread_pool_size();
+#pragma omp parallel num_threads(pool_size)
+    {
+      HostThreadTeamData& data = *(m_instance->get_thread_data());
+
+      const int active = data.organize_team(m_policy.team_size());
+
+      if (active) {
+        data.set_work_partition(
+            m_policy.league_size(),
+            (0 < m_policy.chunk_size() ? m_policy.chunk_size()
+                                       : m_policy.team_iter()));
+      }
+
+      if (is_dynamic) {
+        // Must synchronize to make sure each team has set its
+        // partition before beginning the work stealing loop.
+        if (data.pool_rendezvous()) data.pool_rendezvous_release();
+      }
+
+      if (active) {
+        reference_type update = final_reducer.init(
+            reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+
+        std::pair<int64_t, int64_t> range(0, 0);
+
+        do {
+          range = is_dynamic ? data.get_work_stealing_chunk()
+                             : data.get_work_partition();
+
+          ParallelReduce::template exec_team<WorkTag>(m_functor, data, update,
+                                                      range.first, range.second,
+                                                      m_policy.league_size());
+
+        } while (is_dynamic && 0 <= range.first);
+      } else {
+        final_reducer.init(
+            reinterpret_cast<pointer_type>(data.pool_reduce_local()));
+      }
+
+      data.disband_team();
+
+      //  This thread has updated 'pool_reduce_local()' with its
+      //  contributions to the reduction.  The parallel region is
+      //  about to terminate and the master thread will load and
+      //  reduce each 'pool_reduce_local()' contribution.
+      //  Must 'memory_fence()' to guarantee that storing the update to
+      //  'pool_reduce_local()' will complete before this thread
+      //  exits the parallel region.
+
+      memory_fence();
+    }
+
+    // Reduction:
+
+    const pointer_type ptr =
+        pointer_type(m_instance->get_thread_data(0)->pool_reduce_local());
+
+    for (int i = 1; i < pool_size; ++i) {
+      final_reducer.join(
+          ptr, reinterpret_cast<pointer_type>(
+                   m_instance->get_thread_data(i)->pool_reduce_local()));
+    }
+
+    final_reducer.final(ptr);
+
+    if (m_result_ptr) {
+      const int n = Analysis::value_count(
+          ReducerConditional::select(m_functor, m_reducer));
+
+      for (int j = 0; j < n; ++j) {
+        m_result_ptr[j] = ptr[j];
+      }
+    }
+  }
+
+  //----------------------------------------
+
+  template <class ViewType>
+  inline ParallelReduce(
+      const FunctorType& arg_functor, const Policy& arg_policy,
+      const ViewType& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                           !Kokkos::is_reducer<ReducerType>::value,
+                       void*> = nullptr)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                     FunctorTeamShmemSize<FunctorType>::value(
+                         arg_functor, arg_policy.team_size())) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+  }
+
+  inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
+                        const ReducerType& reducer)
+      : m_instance(nullptr),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                     FunctorTeamShmemSize<FunctorType>::value(
+                         arg_functor, arg_policy.team_size())) {
+    if (t_openmp_instance) {
+      m_instance = t_openmp_instance;
+    } else {
+      m_instance = arg_policy.space().impl_internal_space_instance();
+    }
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                            , Kokkos::HostSpace >::value
+    , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+    );*/
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#undef KOKKOS_PRAGMA_IVDEP_IF_ENABLED
+#undef KOKKOS_OPENMP_OPTIONAL_CHUNK_SIZE
+
+#endif
+#endif /* KOKKOS_OPENMP_PARALLEL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Task.cpp b/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Task.cpp
new file mode 100644 (file)
index 0000000..4babcf0
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_OPENMP) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+#include <OpenMP/Kokkos_OpenMP_Task.hpp>
+#include <cassert>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<Kokkos::OpenMP, typename Kokkos::OpenMP::memory_space>;
+
+HostThreadTeamData& HostThreadTeamDataSingleton::singleton() {
+  static HostThreadTeamDataSingleton s;
+  return s;
+}
+
+HostThreadTeamDataSingleton::HostThreadTeamDataSingleton()
+    : HostThreadTeamData() {
+  Kokkos::OpenMP::memory_space space;
+  const size_t num_pool_reduce_bytes  = 32;
+  const size_t num_team_reduce_bytes  = 32;
+  const size_t num_team_shared_bytes  = 1024;
+  const size_t num_thread_local_bytes = 1024;
+  const size_t alloc_bytes            = HostThreadTeamData::scratch_size(
+      num_pool_reduce_bytes, num_team_reduce_bytes, num_team_shared_bytes,
+      num_thread_local_bytes);
+
+  void* ptr = nullptr;
+  try {
+    ptr = space.allocate(alloc_bytes);
+  } catch (Kokkos::Experimental::RawMemoryAllocationFailure const& f) {
+    // For now, just rethrow the error message with a note
+    // Note that this could, in turn, trigger an out of memory exception,
+    // but it's pretty unlikely, so we won't worry about it for now.
+    // TODO reasonable error message when `std::string` causes OOM error
+    Kokkos::Impl::throw_runtime_exception(
+        std::string("Failure to allocate scratch memory:  ") +
+        f.get_error_message());
+  }
+
+  HostThreadTeamData::scratch_assign(
+      ptr, alloc_bytes, num_pool_reduce_bytes, num_team_reduce_bytes,
+      num_team_shared_bytes, num_thread_local_bytes);
+}
+
+HostThreadTeamDataSingleton::~HostThreadTeamDataSingleton() {
+  Kokkos::OpenMP::memory_space space;
+  space.deallocate(HostThreadTeamData::scratch_buffer(),
+                   static_cast<size_t>(HostThreadTeamData::scratch_bytes()));
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+#else
+void KOKKOS_CORE_SRC_OPENMP_KOKKOS_OPENMP_TASK_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_OPENMP ) && defined( \
+          KOKKOS_ENABLE_TASKDAG ) */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Task.hpp b/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Task.hpp
new file mode 100644 (file)
index 0000000..ec1ede0
--- /dev/null
@@ -0,0 +1,384 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_OPENMP_TASK_HPP
+#define KOKKOS_IMPL_OPENMP_TASK_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_OPENMP) && defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+
+#include <impl/Kokkos_HostThreadTeam.hpp>
+#include <Kokkos_OpenMP.hpp>
+
+#include <type_traits>
+#include <cassert>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+class HostThreadTeamDataSingleton : private HostThreadTeamData {
+ private:
+  HostThreadTeamDataSingleton();
+  ~HostThreadTeamDataSingleton();
+
+ public:
+  static HostThreadTeamData& singleton();
+};
+
+// Hack this as a partial specialization for now
+// TODO @tasking @cleanup DSH Make this the general class template and make the
+// old code the partial specialization
+template <class QueueType>
+class TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::OpenMP, QueueType>> {
+ public:
+  using execution_space = Kokkos::OpenMP;
+  using scheduler_type  = SimpleTaskScheduler<Kokkos::OpenMP, QueueType>;
+  using member_type =
+      TaskTeamMemberAdapter<Kokkos::Impl::HostThreadTeamMember<execution_space>,
+                            scheduler_type>;
+  using memory_space = Kokkos::HostSpace;
+
+  enum : int { max_league_size = HostThreadTeamData::max_pool_members };
+
+  // Must provide task queue execution function
+  static void execute(scheduler_type const& scheduler) {
+    using task_base_type = typename scheduler_type::task_base_type;
+
+    // Unused; ChaseLev queue still needs worker ID even in single case (so we
+    // need to use the thread data from inside of the parallel region.  Team
+    // size is fixed at 1 for now anyway
+    // HostThreadTeamData& team_data_single =
+    // HostThreadTeamDataSingleton::singleton();
+
+    Impl::OpenMPInternal* instance =
+        execution_space().impl_internal_space_instance();
+    const int pool_size = get_max_team_count(scheduler.get_execution_space());
+
+    // TODO @tasking @new_feature DSH allow team sizes other than 1
+    const int team_size = 1;                      // Threads per core
+    instance->resize_thread_data(0,               /* global reduce buffer */
+                                 512 * team_size, /* team reduce buffer */
+                                 0,               /* team shared buffer */
+                                 0                /* thread local buffer */
+    );
+    assert(pool_size % team_size == 0);
+
+    auto& queue = scheduler.queue();
+
+    // queue.initialize_team_queues(pool_size / team_size);
+
+#pragma omp parallel num_threads(pool_size)
+    {
+      Impl::HostThreadTeamData& self = *(instance->get_thread_data());
+
+      // Organizing threads into a team performs a barrier across the
+      // entire pool to insure proper initialization of the team
+      // rendezvous mechanism before a team rendezvous can be performed.
+
+      // organize_team() returns true if this is an active team member
+      if (self.organize_team(team_size)) {
+        member_type single_exec(scheduler, self);
+        member_type team_exec(scheduler, self);
+
+        auto& team_scheduler = team_exec.scheduler();
+
+        auto current_task = OptionalRef<task_base_type>(nullptr);
+
+        while (!queue.is_done()) {
+          // Each team lead attempts to acquire either a thread team task
+          // or a single thread task for the team.
+          if (team_exec.team_rank() == 0) {
+            // loop while both:
+            //   - the queue is not done
+            //   - the most recently popped task is a single task or empty
+            while (!queue.is_done()) {
+              current_task =
+                  queue.pop_ready_task(team_scheduler.team_scheduler_info());
+
+              if (current_task) {
+                if (current_task->is_team_runnable()) {
+                  // break out of the team leader loop to run the team task
+                  break;
+                } else {
+                  KOKKOS_ASSERT(current_task->is_single_runnable());
+                  current_task->as_runnable_task().run(single_exec);
+                  // Respawns are handled in the complete function
+                  queue.complete((*std::move(current_task)).as_runnable_task(),
+                                 team_scheduler.team_scheduler_info());
+                }
+
+              }  // end if current_task is not null
+
+              current_task = nullptr;
+
+            }  // end team leader loop
+          }
+
+          // Otherwise, make sure everyone in the team has the same task
+          team_exec.team_broadcast(current_task, 0);
+
+          if (current_task) {
+            KOKKOS_ASSERT(current_task->is_team_runnable());
+            current_task->as_runnable_task().run(team_exec);
+
+            if (team_exec.team_rank() == 0) {
+              // Respawns are handled in the complete function
+              queue.complete((*std::move(current_task)).as_runnable_task(),
+                             team_scheduler.team_scheduler_info());
+            }
+          }
+        }
+      }
+      self.disband_team();
+    }  // end pragma omp parallel
+  }
+
+  static uint32_t get_max_team_count(execution_space const& espace) {
+    return static_cast<uint32_t>(espace.impl_thread_pool_size());
+  }
+
+  // TODO @tasking @optimization DSH specialize this for trivially destructible
+  // types
+  template <typename TaskType>
+  static void get_function_pointer(typename TaskType::function_type& ptr,
+                                   typename TaskType::destroy_type& dtor) {
+    ptr  = TaskType::apply;
+    dtor = TaskType::destroy;
+  }
+};
+
+template <class Scheduler>
+class TaskQueueSpecializationConstrained<
+    Scheduler,
+    std::enable_if_t<std::is_same<typename Scheduler::execution_space,
+                                  Kokkos::OpenMP>::value>> {
+ public:
+  using execution_space = Kokkos::OpenMP;
+  using scheduler_type  = Scheduler;
+  using member_type =
+      TaskTeamMemberAdapter<Kokkos::Impl::HostThreadTeamMember<execution_space>,
+                            scheduler_type>;
+  using memory_space = Kokkos::HostSpace;
+
+  enum : int { max_league_size = HostThreadTeamData::max_pool_members };
+
+  static void iff_single_thread_recursive_execute(
+      scheduler_type const& scheduler) {
+    using task_base_type = typename scheduler_type::task_base;
+    using queue_type     = typename scheduler_type::queue_type;
+
+    if (1 == OpenMP::impl_thread_pool_size()) {
+      task_base_type* const end = (task_base_type*)task_base_type::EndTag;
+
+      HostThreadTeamData& team_data_single =
+          HostThreadTeamDataSingleton::singleton();
+
+      member_type single_exec(scheduler, team_data_single);
+
+      task_base_type* task = end;
+
+      do {
+        task = end;
+
+        // Loop by priority and then type
+        for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+          for (int j = 0; j < 2 && end == task; ++j) {
+            task =
+                queue_type::pop_ready_task(&scheduler.m_queue->m_ready[i][j]);
+          }
+        }
+
+        if (end == task) break;
+
+        (*task->m_apply)(task, &single_exec);
+
+        scheduler.m_queue->complete(task);
+
+      } while (1);
+    }
+  }
+
+  // Must provide task queue execution function
+  static void execute(scheduler_type const& scheduler) {
+    using task_base_type = typename scheduler_type::task_base;
+    using queue_type     = typename scheduler_type::queue_type;
+
+    static task_base_type* const end = (task_base_type*)task_base_type::EndTag;
+
+    constexpr task_base_type* no_more_tasks_sentinel = nullptr;
+
+    HostThreadTeamData& team_data_single =
+        HostThreadTeamDataSingleton::singleton();
+
+    Impl::OpenMPInternal* instance =
+        execution_space().impl_internal_space_instance();
+    const int pool_size = OpenMP::impl_thread_pool_size();
+
+    const int team_size = 1;       // Threads per core
+    instance->resize_thread_data(0 /* global reduce buffer */
+                                 ,
+                                 512 * team_size /* team reduce buffer */
+                                 ,
+                                 0 /* team shared buffer */
+                                 ,
+                                 0 /* thread local buffer */
+    );
+    assert(pool_size % team_size == 0);
+    auto& queue = scheduler.queue();
+    queue.initialize_team_queues(pool_size / team_size);
+
+#pragma omp parallel num_threads(pool_size)
+    {
+      Impl::HostThreadTeamData& self = *(instance->get_thread_data());
+
+      // Organizing threads into a team performs a barrier across the
+      // entire pool to insure proper initialization of the team
+      // rendezvous mechanism before a team rendezvous can be performed.
+
+      // organize_team() returns true if this is an active team member
+      if (self.organize_team(team_size)) {
+        member_type single_exec(scheduler, team_data_single);
+        member_type team_exec(scheduler, self);
+
+        auto& team_queue = team_exec.scheduler().queue();
+
+        // Loop until all queues are empty and no tasks in flight
+
+        task_base_type* task = no_more_tasks_sentinel;
+
+        do {
+          // Each team lead attempts to acquire either a thread team task
+          // or a single thread task for the team.
+
+          if (0 == team_exec.team_rank()) {
+            bool leader_loop = false;
+
+            do {
+              if (task != no_more_tasks_sentinel && task != end) {
+                // team member #0 completes the previously executed task,
+                // completion may delete the task
+                team_queue.complete(task);
+              }
+
+              // If 0 == m_ready_count then set task = 0
+
+              if (*((volatile int*)&team_queue.m_ready_count) > 0) {
+                task = end;
+                // Attempt to acquire a task
+                // Loop by priority and then type
+                for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+                  for (int j = 0; j < 2 && end == task; ++j) {
+                    task =
+                        queue_type::pop_ready_task(&team_queue.m_ready[i][j]);
+                  }
+                }
+              } else {
+                // returns nullptr if and only if all other queues have a ready
+                // count of 0 also. Otherwise, returns a task from another queue
+                // or `end` if one couldn't be popped
+                task = team_queue.attempt_to_steal_task();
+              }
+
+              // If still tasks are still executing
+              // and no task could be acquired
+              // then continue this leader loop
+              if (task == end) {
+                // this means that the ready task count was not zero, but we
+                // couldn't pop a task (because, for instance, someone else
+                // got there before us
+                leader_loop = true;
+              } else if ((task != no_more_tasks_sentinel) &&
+                         (task_base_type::TaskSingle == task->m_task_type)) {
+                // if a single thread task then execute now
+
+                (*task->m_apply)(task, &single_exec);
+
+                leader_loop = true;
+              } else {
+                leader_loop = false;
+              }
+            } while (leader_loop);
+          }
+
+          // Team lead either found 0 == m_ready_count or a team task
+          // Team lead broadcast acquired task:
+
+          team_exec.team_broadcast(task, 0);
+
+          if (task != no_more_tasks_sentinel) {  // Thread Team Task
+
+            (*task->m_apply)(task, &team_exec);
+
+            // The m_apply function performs a barrier
+          }
+        } while (task != no_more_tasks_sentinel);
+      }
+      self.disband_team();
+    }  // end pragma omp parallel
+  }
+
+  template <typename TaskType>
+  static void get_function_pointer(typename TaskType::function_type& ptr,
+                                   typename TaskType::destroy_type& dtor) {
+    ptr  = TaskType::apply;
+    dtor = TaskType::destroy;
+  }
+};
+
+extern template class TaskQueue<Kokkos::OpenMP,
+                                typename Kokkos::OpenMP::memory_space>;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_OPENMP_TASK_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Team.hpp b/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_Team.hpp
new file mode 100644 (file)
index 0000000..7353317
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMP_TEAM_HPP
+#define KOKKOS_OPENMP_TEAM_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_OPENMP)
+
+#include <OpenMP/Kokkos_OpenMP_Instance.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::OpenMP, Properties...>
+    : public PolicyTraits<Properties...> {
+ public:
+  //! Tag this class as a kokkos execution policy
+  using execution_policy = TeamPolicyInternal<OpenMP, Properties...>;
+
+  using traits = PolicyTraits<Properties...>;
+
+  const typename traits::execution_space& space() const {
+    static typename traits::execution_space m_space;
+    return m_space;
+  }
+
+  template <class ExecSpace, class... OtherProperties>
+  friend class TeamPolicyInternal;
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(
+      const TeamPolicyInternal<Kokkos::OpenMP, OtherProperties...>& p) {
+    m_league_size            = p.m_league_size;
+    m_team_size              = p.m_team_size;
+    m_team_alloc             = p.m_team_alloc;
+    m_team_iter              = p.m_team_iter;
+    m_team_scratch_size[0]   = p.m_team_scratch_size[0];
+    m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+    m_team_scratch_size[1]   = p.m_team_scratch_size[1];
+    m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+    m_chunk_size             = p.m_chunk_size;
+    m_tune_team              = p.m_tune_team;
+    m_tune_vector            = p.m_tune_vector;
+  }
+  //----------------------------------------
+
+  template <class FunctorType>
+  int team_size_max(const FunctorType&, const ParallelForTag&) const {
+    int pool_size          = traits::execution_space::impl_thread_pool_size(1);
+    int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
+    return pool_size < max_host_team_size ? pool_size : max_host_team_size;
+  }
+
+  int impl_vector_length() const { return 1; }
+
+  template <class FunctorType>
+  int team_size_max(const FunctorType&, const ParallelReduceTag&) const {
+    int pool_size          = traits::execution_space::impl_thread_pool_size(1);
+    int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
+    return pool_size < max_host_team_size ? pool_size : max_host_team_size;
+  }
+  template <class FunctorType, class ReducerType>
+  inline int team_size_max(const FunctorType& f, const ReducerType&,
+                           const ParallelReduceTag& t) const {
+    return team_size_max(f, t);
+  }
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType&, const ParallelForTag&) const {
+    return traits::execution_space::impl_thread_pool_size(2);
+  }
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType&,
+                            const ParallelReduceTag&) const {
+    return traits::execution_space::impl_thread_pool_size(2);
+  }
+  template <class FunctorType, class ReducerType>
+  inline int team_size_recommended(const FunctorType& f, const ReducerType&,
+                                   const ParallelReduceTag& t) const {
+    return team_size_recommended(f, t);
+  }
+
+  inline static int vector_length_max() {
+    return 1024;
+  }  // Use arbitrary large number, is meant as a vectorizable length
+
+  inline static int scratch_size_max(int level) {
+    return (level == 0 ? 1024 * 32 :  // Roughly L1 size
+                20 * 1024 * 1024);    // Limit to keep compatibility with CUDA
+  }
+
+  //----------------------------------------
+
+ private:
+  int m_league_size;
+  int m_team_size;
+  int m_team_alloc;
+  int m_team_iter;
+
+  size_t m_team_scratch_size[2];
+  size_t m_thread_scratch_size[2];
+
+  int m_chunk_size;
+
+  bool m_tune_team;
+  bool m_tune_vector;
+
+  inline void init(const int league_size_request, const int team_size_request) {
+    const int pool_size  = traits::execution_space::impl_thread_pool_size(0);
+    const int team_grain = traits::execution_space::impl_thread_pool_size(2);
+    const int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
+    const int team_max =
+        ((pool_size < max_host_team_size) ? pool_size : max_host_team_size);
+
+    m_league_size = league_size_request;
+
+    if (team_size_request > team_max)
+      Kokkos::abort("Kokkos::abort: Requested Team Size is too large!");
+    m_team_size = team_size_request < team_max ? team_size_request : team_max;
+
+    // Round team size up to a multiple of 'team_gain'
+    const int team_size_grain =
+        team_grain * ((m_team_size + team_grain - 1) / team_grain);
+    const int team_count = pool_size / team_size_grain;
+
+    // Constraint : pool_size = m_team_alloc * team_count
+    m_team_alloc = pool_size / team_count;
+
+    // Maxumum number of iterations each team will take:
+    m_team_iter = (m_league_size + team_count - 1) / team_count;
+
+    set_auto_chunk_size();
+  }
+
+ public:
+  inline int team_size() const { return m_team_size; }
+  inline int league_size() const { return m_league_size; }
+  inline bool impl_auto_team_size() const { return m_tune_team; }
+  inline bool impl_auto_vector_length() const { return m_tune_vector; }
+  inline void impl_set_team_size(size_t new_team_size) {
+    m_team_size = new_team_size;
+  }
+  inline void impl_set_vector_length(size_t) {}
+  inline size_t scratch_size(const int& level, int team_size_ = -1) const {
+    if (team_size_ < 0) team_size_ = m_team_size;
+    return m_team_scratch_size[level] +
+           team_size_ * m_thread_scratch_size[level];
+  }
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request, int team_size_request,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(false),
+        m_tune_vector(false) {
+    init(league_size_request, team_size_request);
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(true),
+        m_tune_vector(false) {
+    init(league_size_request,
+         traits::execution_space::impl_thread_pool_size(2));
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(true),
+        m_tune_vector(true) {
+    init(league_size_request,
+         traits::execution_space::impl_thread_pool_size(2));
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request, const int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(false),
+        m_tune_vector(true) {
+    init(league_size_request, team_size_request);
+  }
+
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(false),
+        m_tune_vector(false) {
+    init(league_size_request, team_size_request);
+  }
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(true),
+        m_tune_vector(false) {
+    init(league_size_request,
+         traits::execution_space::impl_thread_pool_size(2));
+  }
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(true),
+        m_tune_vector(true) {
+    init(league_size_request,
+         traits::execution_space::impl_thread_pool_size(2));
+  }
+
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team(true),
+        m_tune_vector(true) {
+    init(league_size_request, team_size_request);
+  }
+
+  inline int team_alloc() const { return m_team_alloc; }
+  inline int team_iter() const { return m_team_iter; }
+
+  inline int chunk_size() const { return m_chunk_size; }
+
+  /** \brief set chunk_size to a discrete value*/
+  inline TeamPolicyInternal& set_chunk_size(
+      typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  /** \brief set per team scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(const int& level,
+                                              const PerTeamValue& per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  /** \brief set per thread scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerThreadValue& per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  /** \brief set per thread and per team scratch size for a specific level of
+   * the scratch hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerTeamValue& per_team,
+      const PerThreadValue& per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+ private:
+  /** \brief finalize chunk_size if it was set to AUTO*/
+  inline void set_auto_chunk_size() {
+    int concurrency =
+        traits::execution_space::impl_thread_pool_size(0) / m_team_alloc;
+    if (concurrency == 0) concurrency = 1;
+
+    if (m_chunk_size > 0) {
+      if (!Impl::is_integral_power_of_two(m_chunk_size))
+        Kokkos::abort("TeamPolicy blocking granularity must be power of two");
+    }
+
+    int new_chunk_size = 1;
+    while (new_chunk_size * 100 * concurrency < m_league_size)
+      new_chunk_size *= 2;
+    if (new_chunk_size < 128) {
+      new_chunk_size = 1;
+      while ((new_chunk_size * 40 * concurrency < m_league_size) &&
+             (new_chunk_size < 128))
+        new_chunk_size *= 2;
+    }
+    m_chunk_size = new_chunk_size;
+  }
+
+ public:
+  using member_type = Impl::HostThreadTeamMember<Kokkos::OpenMP>;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+#endif /* KOKKOS_OPENMP_TEAM_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_WorkGraphPolicy.hpp b/bundled/kokkos-3.7.00/core/src/OpenMP/Kokkos_OpenMP_WorkGraphPolicy.hpp
new file mode 100644 (file)
index 0000000..55d9c58
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP
+#define KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP
+
+#include <Kokkos_OpenMP.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+                  Kokkos::OpenMP> {
+ private:
+  using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+
+  Policy m_policy;
+  FunctorType m_functor;
+
+  template <class TagType>
+  std::enable_if_t<std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    m_functor(w);
+  }
+
+  template <class TagType>
+  std::enable_if_t<!std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    const TagType t{};
+    m_functor(t, w);
+  }
+
+ public:
+  inline void execute() {
+#pragma omp parallel num_threads(OpenMP::impl_thread_pool_size())
+    {
+      // Spin until COMPLETED_TOKEN.
+      // END_TOKEN indicates no work is currently available.
+
+      for (std::int32_t w = Policy::END_TOKEN;
+           Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+        if (Policy::END_TOKEN != w) {
+          exec_one<typename Policy::work_tag>(w);
+          m_policy.completed_work(w);
+        }
+      }
+    }
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #define KOKKOS_OPENMP_WORKGRAPHPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTargetSpace.cpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTargetSpace.cpp
new file mode 100644 (file)
index 0000000..5ff9bf3
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <algorithm>
+#include <omp.h>
+
+/*--------------------------------------------------------------------------*/
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <memory.h>
+
+#include <iostream>
+#include <sstream>
+#include <cstring>
+
+#include <Kokkos_OpenMPTarget.hpp>
+#include <Kokkos_OpenMPTargetSpace.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Experimental {
+/* Default allocation mechanism */
+OpenMPTargetSpace::OpenMPTargetSpace() {}
+
+void* OpenMPTargetSpace::impl_allocate(
+
+    const char* arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  static_assert(sizeof(void*) == sizeof(uintptr_t),
+                "Error sizeof(void*) != sizeof(uintptr_t)");
+
+  void* ptr;
+
+  ptr = omp_target_alloc(arg_alloc_size, omp_get_default_device());
+
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+
+  return ptr;
+}
+
+void* OpenMPTargetSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void* OpenMPTargetSpace::allocate(const char* arg_label,
+                                  const size_t arg_alloc_size,
+                                  const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+
+void OpenMPTargetSpace::impl_deallocate(
+    const char* arg_label, void* const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+  if (arg_alloc_ptr) {
+    omp_target_free(arg_alloc_ptr, omp_get_default_device());
+  }
+}
+
+void OpenMPTargetSpace::deallocate(void* const arg_alloc_ptr,
+                                   const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void OpenMPTargetSpace::deallocate(const char* arg_label,
+                                   void* const arg_alloc_ptr,
+                                   const size_t arg_alloc_size,
+                                   const size_t arg_logical_size) const
+
+{
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_DEBUG
+SharedAllocationRecord<void, void> SharedAllocationRecord<
+    Kokkos::Experimental::OpenMPTargetSpace, void>::s_root_record;
+#endif
+
+SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace,
+                       void>::~SharedAllocationRecord() {
+  auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     alloc_size, (alloc_size - sizeof(SharedAllocationHeader)));
+}
+
+SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::OpenMPTargetSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace,
+                                  void>::s_root_record,
+#endif
+          Kokkos::Impl::checked_allocation_with_header(arg_space, arg_label,
+                                                       arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, arg_label);
+
+  // TODO DeepCopy
+  // DeepCopy
+  Kokkos::Impl::DeepCopy<Experimental::OpenMPTargetSpace, HostSpace>(
+      RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
+  Kokkos::fence(
+      "SharedAllocationRecord<Kokkos::Experimental::OpenMPTargetSpace, "
+      "void>::SharedAllocationRecord(): fence after copying header from "
+      "HostSpace");
+}
+
+//----------------------------------------------------------------------------
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+/*
+namespace Kokkos {
+namespace {
+  const unsigned HOST_SPACE_ATOMIC_MASK = 0xFFFF;
+  const unsigned HOST_SPACE_ATOMIC_XOR_MASK = 0x5A39;
+  static int HOST_SPACE_ATOMIC_LOCKS[HOST_SPACE_ATOMIC_MASK+1];
+}
+
+namespace Impl {
+void init_lock_array_host_space() {
+  static int is_initialized = 0;
+  if(! is_initialized)
+    for(int i = 0; i < static_cast<int> (HOST_SPACE_ATOMIC_MASK+1); i++)
+      HOST_SPACE_ATOMIC_LOCKS[i] = 0;
+}
+
+bool lock_address_host_space(void* ptr) {
+  return 0 == atomic_compare_exchange( &HOST_SPACE_ATOMIC_LOCKS[
+      (( size_t(ptr) >> 2 ) & HOST_SPACE_ATOMIC_MASK) ^
+HOST_SPACE_ATOMIC_XOR_MASK] , 0 , 1);
+}
+
+void unlock_address_host_space(void* ptr) {
+   atomic_exchange( &HOST_SPACE_ATOMIC_LOCKS[
+      (( size_t(ptr) >> 2 ) & HOST_SPACE_ATOMIC_MASK) ^
+HOST_SPACE_ATOMIC_XOR_MASK] , 0);
+}
+
+}
+}*/
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// To avoid additional compilation cost for something that's (mostly?) not
+// performance sensitive, we explicity instantiate these CRTP base classes here,
+// where we have access to the associated *_timpl.hpp header files.
+template class HostInaccessibleSharedAllocationRecordCommon<
+    Kokkos::Experimental::OpenMPTargetSpace>;
+template class SharedAllocationRecordCommon<
+    Kokkos::Experimental::OpenMPTargetSpace>;
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Abort.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Abort.hpp
new file mode 100644 (file)
index 0000000..ff07ce4
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGET_ABORT_HPP
+#define KOKKOS_OPENMPTARGET_ABORT_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+
+namespace Kokkos {
+namespace Impl {
+
+KOKKOS_INLINE_FUNCTION void OpenMPTarget_abort(char const *msg) {
+  fprintf(stderr, "%s.\n", msg);
+  std::abort();
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Error.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Error.hpp
new file mode 100644 (file)
index 0000000..1ca3063
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGET_ERROR_HPP
+#define KOKKOS_OPENMPTARGET_ERROR_HPP
+
+#include <impl/Kokkos_Error.hpp>
+#include <sstream>
+
+namespace Kokkos {
+namespace Impl {
+
+inline void ompt_internal_safe_call(int e, const char* name,
+                                    const char* file = nullptr,
+                                    const int line   = 0) {
+  if (e != 0) {
+    std::ostringstream out;
+    out << name << " return value of " << e << " indicates failure";
+    if (file) {
+      out << " " << file << ":" << line;
+    }
+    throw_runtime_exception(out.str());
+  }
+}
+
+#define OMPT_SAFE_CALL(call) \
+  Kokkos::Impl::ompt_internal_safe_call(call, #call, __FILE__, __LINE__)
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.cpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.cpp
new file mode 100644 (file)
index 0000000..d3bec5a
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <stdio.h>
+#include <limits>
+#include <iostream>
+#include <vector>
+#include <Kokkos_Core.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <iostream>
+#include <impl/Kokkos_CPUDiscovery.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+
+// FIXME_OPENMPTARGET currently unused
+/*
+namespace Kokkos {
+namespace Impl {
+namespace {
+
+KOKKOS_INLINE_FUNCTION
+int kokkos_omp_in_parallel();
+
+KOKKOS_INLINE_FUNCTION
+int kokkos_omp_in_parallel() { return omp_in_parallel(); }
+
+bool s_using_hwloc = false;
+
+}  // namespace
+}  // namespace Impl
+}  // namespace Kokkos
+*/
+
+namespace Kokkos {
+namespace Impl {
+
+void OpenMPTargetExec::verify_is_process(const char* const label) {
+  // Fails if the current task is in a parallel region or is not on the host.
+  if (omp_in_parallel() && (!omp_is_initial_device())) {
+    std::string msg(label);
+    msg.append(" ERROR: in parallel or on device");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+}
+
+void OpenMPTargetExec::verify_initialized(const char* const label) {
+  if (0 == Kokkos::Experimental::OpenMPTarget().impl_is_initialized()) {
+    std::string msg(label);
+    msg.append(" ERROR: not initialized");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+}
+
+void* OpenMPTargetExec::m_scratch_ptr         = nullptr;
+int64_t OpenMPTargetExec::m_scratch_size      = 0;
+int* OpenMPTargetExec::m_lock_array           = nullptr;
+int64_t OpenMPTargetExec::m_lock_size         = 0;
+uint32_t* OpenMPTargetExec::m_uniquetoken_ptr = nullptr;
+
+void OpenMPTargetExec::clear_scratch() {
+  Kokkos::Experimental::OpenMPTargetSpace space;
+  space.deallocate(m_scratch_ptr, m_scratch_size);
+  m_scratch_ptr  = nullptr;
+  m_scratch_size = 0;
+}
+
+void OpenMPTargetExec::clear_lock_array() {
+  if (m_lock_array != nullptr) {
+    Kokkos::Experimental::OpenMPTargetSpace space;
+    space.deallocate(m_lock_array, m_lock_size);
+    m_lock_array = nullptr;
+    m_lock_size  = 0;
+  }
+}
+
+void* OpenMPTargetExec::get_scratch_ptr() { return m_scratch_ptr; }
+
+void OpenMPTargetExec::resize_scratch(int64_t team_size, int64_t shmem_size_L0,
+                                      int64_t shmem_size_L1,
+                                      int64_t league_size) {
+  Kokkos::Experimental::OpenMPTargetSpace space;
+  const int64_t shmem_size =
+      shmem_size_L0 + shmem_size_L1;  // L0 + L1 scratch memory per team.
+  const int64_t padding = shmem_size * 10 / 100;  // Padding per team.
+  // Total amount of scratch memory allocated is depenedent
+  // on the maximum number of in-flight teams possible.
+  int64_t total_size =
+      (shmem_size + OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE + padding) *
+      std::min(MAX_ACTIVE_THREADS / team_size, league_size);
+
+  if (total_size > m_scratch_size) {
+    space.deallocate(m_scratch_ptr, m_scratch_size);
+    m_scratch_size = total_size;
+    m_scratch_ptr  = space.allocate(total_size);
+  }
+}
+
+int* OpenMPTargetExec::get_lock_array(int num_teams) {
+  Kokkos::Experimental::OpenMPTargetSpace space;
+  int max_active_league_size = MAX_ACTIVE_THREADS / 32;
+  int lock_array_elem =
+      (num_teams > max_active_league_size) ? num_teams : max_active_league_size;
+  if (m_lock_size < (lock_array_elem * sizeof(int))) {
+    space.deallocate(m_lock_array, m_lock_size);
+    m_lock_size  = lock_array_elem * sizeof(int);
+    m_lock_array = static_cast<int*>(space.allocate(m_lock_size));
+
+    // FIXME_OPENMPTARGET - Creating a target region here to initialize the
+    // lock_array with 0's fails. Hence creating an equivalent host array to
+    // achieve the same. Value of host array are then copied to the lock_array.
+    int* h_lock_array = static_cast<int*>(
+        omp_target_alloc(m_lock_size, omp_get_initial_device()));
+
+    for (int i = 0; i < lock_array_elem; ++i) h_lock_array[i] = 0;
+
+    OMPT_SAFE_CALL(omp_target_memcpy(m_lock_array, h_lock_array, m_lock_size, 0,
+                                     0, omp_get_default_device(),
+                                     omp_get_initial_device()));
+
+    omp_target_free(h_lock_array, omp_get_initial_device());
+  }
+
+  return m_lock_array;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_ENABLE_OPENMPTARGET
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp
new file mode 100644 (file)
index 0000000..52f5dcb
--- /dev/null
@@ -0,0 +1,1964 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGETEXEC_HPP
+#define KOKKOS_OPENMPTARGETEXEC_HPP
+
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_Spinwait.hpp>
+
+#include <Kokkos_Atomic.hpp>
+#include "Kokkos_OpenMPTarget_Abort.hpp"
+
+// FIXME_OPENMPTARGET - Using this macro to implement a workaround for
+// hierarchical reducers. It avoids hitting the code path which we wanted to
+// write but doesn't work. undef'ed at the end.
+// Intel compilers prefer the non-workaround version.
+#ifndef KOKKOS_ARCH_INTEL_GPU
+#define KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+#endif
+
+// FIXME_OPENMPTARGET - Using this macro to implement a workaround for
+// hierarchical scan. It avoids hitting the code path which we wanted to
+// write but doesn't work. undef'ed at the end.
+#ifndef KOKKOS_ARCH_INTEL_GPU
+#define KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
+#endif
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Reducer>
+struct OpenMPTargetReducerWrapper {
+  using value_type = typename Reducer::value_type;
+
+  // Using a generic unknown Reducer for the OpenMPTarget backend is not
+  // implemented.
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type&, const value_type&) = delete;
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type&, const volatile value_type&) = delete;
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type&) = delete;
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Sum<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) { dest += src; }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest += src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::sum();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Prod<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) { dest *= src; }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest *= src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::prod();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Min<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src < dest) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src < dest) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::min();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<Max<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src > dest) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src > dest) dest = src;
+  }
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::max();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<LAnd<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest = dest && src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest = dest && src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::land();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<LOr<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  using result_view_type = Kokkos::View<value_type, Space>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest = dest || src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest = dest || src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::lor();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<BAnd<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest = dest & src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest = dest & src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::band();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<BOr<Scalar, Space>> {
+ public:
+  // Required
+  using value_type = std::remove_cv_t<Scalar>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest = dest | src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest = dest | src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val = reduction_identity<value_type>::bor();
+  }
+};
+
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinLoc<Scalar, Index, Space>> {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = ValLocScalar<scalar_type, index_type>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src.val < dest.val) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src.val < dest.val) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.val = reduction_identity<scalar_type>::min();
+    val.loc = reduction_identity<index_type>::min();
+  }
+};
+
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MaxLoc<Scalar, Index, Space>> {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = ValLocScalar<scalar_type, index_type>;
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src.val > dest.val) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src.val > dest.val) dest = src;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.val = reduction_identity<scalar_type>::max();
+    val.loc = reduction_identity<index_type>::min();
+  }
+};
+
+template <class Scalar, class Space>
+struct OpenMPTargetReducerWrapper<MinMax<Scalar, Space>> {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+
+ public:
+  // Required
+  using value_type = MinMaxScalar<scalar_type>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+    }
+    if (src.max_val > dest.max_val) {
+      dest.max_val = src.max_val;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+    }
+    if (src.max_val > dest.max_val) {
+      dest.max_val = src.max_val;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.max_val = reduction_identity<scalar_type>::max();
+    val.min_val = reduction_identity<scalar_type>::min();
+  }
+};
+
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinMaxLoc<Scalar, Index, Space>> {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = MinMaxLocScalar<scalar_type, index_type>;
+
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+      dest.min_loc = src.min_loc;
+    }
+    if (src.max_val > dest.max_val) {
+      dest.max_val = src.max_val;
+      dest.max_loc = src.max_loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+      dest.min_loc = src.min_loc;
+    }
+    if (src.max_val > dest.max_val) {
+      dest.max_val = src.max_val;
+      dest.max_loc = src.max_loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.max_val = reduction_identity<scalar_type>::max();
+    val.min_val = reduction_identity<scalar_type>::min();
+    val.max_loc = reduction_identity<index_type>::min();
+    val.min_loc = reduction_identity<index_type>::min();
+  }
+};
+
+//
+// specialize for MaxFirstLoc
+//
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MaxFirstLoc<Scalar, Index, Space>> {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = ValLocScalar<scalar_type, index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (dest.val < src.val) {
+      dest = src;
+    } else if (!(src.val < dest.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (dest.val < src.val) {
+      dest = src;
+    } else if (!(src.val < dest.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.val = reduction_identity<scalar_type>::max();
+    val.loc = reduction_identity<index_type>::min();
+  }
+#pragma omp end declare target
+};
+
+//
+// specialize for MinFirstLoc
+//
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinFirstLoc<Scalar, Index, Space>> {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = ValLocScalar<scalar_type, index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src.val < dest.val) {
+      dest = src;
+    } else if (!(dest.val < src.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src.val < dest.val) {
+      dest = src;
+    } else if (!(dest.val < src.val)) {
+      dest.loc = (src.loc < dest.loc) ? src.loc : dest.loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.val = reduction_identity<scalar_type>::min();
+    val.loc = reduction_identity<index_type>::min();
+  }
+#pragma omp end declare target
+};
+
+//
+// specialize for MinMaxFirstLastLoc
+//
+template <class Scalar, class Index, class Space>
+struct OpenMPTargetReducerWrapper<MinMaxFirstLastLoc<Scalar, Index, Space>> {
+ private:
+  using scalar_type = std::remove_cv_t<Scalar>;
+  using index_type  = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = MinMaxLocScalar<scalar_type, index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+      dest.min_loc = src.min_loc;
+    } else if (!(dest.min_val < src.min_val)) {
+      dest.min_loc = (src.min_loc < dest.min_loc) ? src.min_loc : dest.min_loc;
+    }
+
+    if (dest.max_val < src.max_val) {
+      dest.max_val = src.max_val;
+      dest.max_loc = src.max_loc;
+    } else if (!(src.max_val < dest.max_val)) {
+      dest.max_loc = (src.max_loc > dest.max_loc) ? src.max_loc : dest.max_loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    if (src.min_val < dest.min_val) {
+      dest.min_val = src.min_val;
+      dest.min_loc = src.min_loc;
+    } else if (!(dest.min_val < src.min_val)) {
+      dest.min_loc = (src.min_loc < dest.min_loc) ? src.min_loc : dest.min_loc;
+    }
+
+    if (dest.max_val < src.max_val) {
+      dest.max_val = src.max_val;
+      dest.max_loc = src.max_loc;
+    } else if (!(src.max_val < dest.max_val)) {
+      dest.max_loc = (src.max_loc > dest.max_loc) ? src.max_loc : dest.max_loc;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.max_val = reduction_identity<scalar_type>::max();
+    val.min_val = reduction_identity<scalar_type>::min();
+    val.max_loc = reduction_identity<index_type>::max();
+    val.min_loc = reduction_identity<index_type>::min();
+  }
+#pragma omp end declare target
+};
+
+//
+// specialize for FirstLoc
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<FirstLoc<Index, Space>> {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = FirstLocScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest.min_loc_true = (src.min_loc_true < dest.min_loc_true)
+                            ? src.min_loc_true
+                            : dest.min_loc_true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest.min_loc_true = (src.min_loc_true < dest.min_loc_true)
+                            ? src.min_loc_true
+                            : dest.min_loc_true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.min_loc_true = reduction_identity<index_type>::min();
+  }
+#pragma omp end declare target
+};
+
+//
+// specialize for LastLoc
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<LastLoc<Index, Space>> {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = LastLocScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest.max_loc_true = (src.max_loc_true > dest.max_loc_true)
+                            ? src.max_loc_true
+                            : dest.max_loc_true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest.max_loc_true = (src.max_loc_true > dest.max_loc_true)
+                            ? src.max_loc_true
+                            : dest.max_loc_true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.max_loc_true = reduction_identity<index_type>::max();
+  }
+#pragma omp end declare target
+};
+
+//
+// specialize for StdIsPartitioned
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<StdIsPartitioned<Index, Space>> {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = StdIsPartScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest.max_loc_true = (dest.max_loc_true < src.max_loc_true)
+                            ? src.max_loc_true
+                            : dest.max_loc_true;
+
+    dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+                             ? dest.min_loc_false
+                             : src.min_loc_false;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest.max_loc_true = (dest.max_loc_true < src.max_loc_true)
+                            ? src.max_loc_true
+                            : dest.max_loc_true;
+
+    dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+                             ? dest.min_loc_false
+                             : src.min_loc_false;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.max_loc_true  = ::Kokkos::reduction_identity<index_type>::max();
+    val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
+  }
+#pragma omp end declare target
+};
+
+//
+// specialize for StdPartitionPoint
+//
+template <class Index, class Space>
+struct OpenMPTargetReducerWrapper<StdPartitionPoint<Index, Space>> {
+ private:
+  using index_type = std::remove_cv_t<Index>;
+
+ public:
+  // Required
+  using value_type = StdPartPointScalar<index_type>;
+
+// WORKAROUND OPENMPTARGET
+// This pragma omp declare target should not be necessary, but Intel compiler
+// fails without it
+#pragma omp declare target
+  // Required
+  KOKKOS_INLINE_FUNCTION
+  static void join(value_type& dest, const value_type& src) {
+    dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+                             ? dest.min_loc_false
+                             : src.min_loc_false;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void join(volatile value_type& dest, const volatile value_type& src) {
+    dest.min_loc_false = (dest.min_loc_false < src.min_loc_false)
+                             ? dest.min_loc_false
+                             : src.min_loc_false;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void init(value_type& val) {
+    val.min_loc_false = ::Kokkos::reduction_identity<index_type>::min();
+  }
+#pragma omp end declare target
+};
+
+/*
+template<class ReducerType>
+class OpenMPTargetReducerWrapper {
+  public:
+    const ReducerType& reducer;
+    using value_type = typename ReducerType::value_type;
+    value_type& value;
+
+    KOKKOS_INLINE_FUNCTION
+    void join(const value_type& upd) {
+      reducer.join(value,upd);
+    }
+
+    KOKKOS_INLINE_FUNCTION
+    void init(const value_type& upd) {
+      reducer.init(value,upd);
+    }
+};*/
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+/** \brief  Data for OpenMPTarget thread execution */
+
+class OpenMPTargetExec {
+ public:
+  // FIXME_OPENMPTARGET - Currently the maximum number of
+  // teams possible is calculated based on NVIDIA's Volta GPU. In
+  // future this value should be based on the chosen architecture for the
+  // OpenMPTarget backend.
+  enum { MAX_ACTIVE_THREADS = 2080 * 80 };
+  enum { MAX_ACTIVE_TEAMS = MAX_ACTIVE_THREADS / 32 };
+
+ private:
+  static void* scratch_ptr;
+
+ public:
+  static void verify_is_process(const char* const);
+  static void verify_initialized(const char* const);
+
+  static int* get_lock_array(int num_teams);
+  static void* get_scratch_ptr();
+  static void clear_scratch();
+  static void clear_lock_array();
+  static void resize_scratch(int64_t team_reduce_bytes,
+                             int64_t team_shared_bytes,
+                             int64_t thread_local_bytes, int64_t league_size);
+
+  static void* m_scratch_ptr;
+  static int64_t m_scratch_size;
+  static int* m_lock_array;
+  static int64_t m_lock_size;
+  static uint32_t* m_uniquetoken_ptr;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+class OpenMPTargetExecTeamMember {
+ public:
+  enum { TEAM_REDUCE_SIZE = 512 };
+
+  /** \brief  Thread states for team synchronization */
+  enum { Active = 0, Rendezvous = 1 };
+
+  using execution_space      = Kokkos::Experimental::OpenMPTarget;
+  using scratch_memory_space = execution_space::scratch_memory_space;
+
+  scratch_memory_space m_team_shared;
+  size_t m_team_scratch_size[2];
+  int m_team_rank;
+  int m_team_size;
+  int m_league_rank;
+  int m_league_size;
+  int m_vector_length;
+  int m_vector_lane;
+  int m_shmem_block_index;
+  void* m_glb_scratch;
+  void* m_reduce_scratch;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_shmem() const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  // set_team_thread_mode routine parameters for future understanding:
+  // first parameter - scratch level.
+  // second parameter - size multiplier for advancing scratch ptr after a
+  // request was serviced. third parameter - offset size multiplier from current
+  // scratch ptr when returning a ptr for a request.
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_scratch(int level) const {
+    return m_team_shared.set_team_thread_mode(level, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& thread_scratch(int level) const {
+    return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
+  }
+
+  KOKKOS_INLINE_FUNCTION int league_rank() const { return m_league_rank; }
+  KOKKOS_INLINE_FUNCTION int league_size() const { return m_league_size; }
+  KOKKOS_INLINE_FUNCTION int team_rank() const { return m_team_rank; }
+  KOKKOS_INLINE_FUNCTION int team_size() const { return m_team_size; }
+  KOKKOS_INLINE_FUNCTION void* impl_reduce_scratch() const {
+    return m_reduce_scratch;
+  }
+
+  KOKKOS_INLINE_FUNCTION void team_barrier() const {
+#pragma omp barrier
+  }
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType& value,
+                                             int thread_id) const {
+    // Make sure there is enough scratch space:
+    using type = std::conditional_t<(sizeof(ValueType) < TEAM_REDUCE_SIZE),
+                                    ValueType, void>;
+    type* team_scratch =
+        reinterpret_cast<type*>(static_cast<char*>(m_glb_scratch) +
+                                TEAM_REDUCE_SIZE * omp_get_team_num());
+#pragma omp barrier
+    if (team_rank() == thread_id) *team_scratch = value;
+#pragma omp barrier
+    value = *team_scratch;
+  }
+
+  template <class Closure, class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(const Closure& f, ValueType& value,
+                                             const int& thread_id) const {
+    f(value);
+    team_broadcast(value, thread_id);
+  }
+
+  // FIXME_OPENMPTARGET this function has the wrong interface and currently
+  // ignores the reducer passed.
+  template <class ValueType, class JoinOp>
+  KOKKOS_INLINE_FUNCTION ValueType team_reduce(const ValueType& value,
+                                               const JoinOp&) const {
+#pragma omp barrier
+
+    using value_type = ValueType;
+    //    const JoinLambdaAdapter<value_type, JoinOp> op(op_in);
+
+    // Make sure there is enough scratch space:
+    using type = std::conditional_t<(sizeof(value_type) < TEAM_REDUCE_SIZE),
+                                    value_type, void>;
+
+    const int n_values = TEAM_REDUCE_SIZE / sizeof(value_type);
+    type* team_scratch =
+        reinterpret_cast<type*>(static_cast<char*>(m_glb_scratch) +
+                                TEAM_REDUCE_SIZE * omp_get_team_num());
+    for (int i = m_team_rank; i < n_values; i += m_team_size) {
+      team_scratch[i] = value_type();
+    }
+
+#pragma omp barrier
+
+    for (int k = 0; k < m_team_size; k += n_values) {
+      if ((k <= m_team_rank) && (k + n_values > m_team_rank))
+        team_scratch[m_team_rank % n_values] += value;
+#pragma omp barrier
+    }
+
+    for (int d = 1; d < n_values; d *= 2) {
+      if ((m_team_rank + d < n_values) && (m_team_rank % (2 * d) == 0)) {
+        team_scratch[m_team_rank] += team_scratch[m_team_rank + d];
+      }
+#pragma omp barrier
+    }
+    return team_scratch[0];
+  }
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering
+   *          with intra-team non-deterministic ordering accumulation.
+   *
+   *  The global inter-team accumulation value will, at the end of the
+   *  league's parallel execution, be the scan's total.
+   *  Parallel execution ordering of the league's teams is non-deterministic.
+   *  As such the base value for each team's scan operation is similarly
+   *  non-deterministic.
+   */
+  template <typename ArgType>
+  KOKKOS_INLINE_FUNCTION ArgType
+  team_scan(const ArgType& /*value*/, ArgType* const /*global_accum*/) const {
+    // FIXME_OPENMPTARGET
+    /*  // Make sure there is enough scratch space:
+      using type =
+        std::conditional_t<(sizeof(ArgType) < TEAM_REDUCE_SIZE), ArgType, void>;
+
+      volatile type * const work_value  = ((type*) m_exec.scratch_thread());
+
+      *work_value = value ;
+
+      memory_fence();
+
+      if ( team_fan_in() ) {
+        // The last thread to synchronize returns true, all other threads wait
+      for team_fan_out()
+        // m_team_base[0]                 == highest ranking team member
+        // m_team_base[ m_team_size - 1 ] == lowest ranking team member
+        //
+        // 1) copy from lower to higher rank, initialize lowest rank to zero
+        // 2) prefix sum from lowest to highest rank, skipping lowest rank
+
+        type accum = 0 ;
+
+        if ( global_accum ) {
+          for ( int i = m_team_size ; i-- ; ) {
+            type & val = *((type*) m_exec.pool_rev( m_team_base_rev + i
+      )->scratch_thread()); accum += val ;
+          }
+          accum = atomic_fetch_add( global_accum , accum );
+        }
+
+        for ( int i = m_team_size ; i-- ; ) {
+          type & val = *((type*) m_exec.pool_rev( m_team_base_rev + i
+      )->scratch_thread()); const type offset = accum ; accum += val ; val =
+      offset ;
+        }
+
+        memory_fence();
+      }
+
+      team_fan_out();
+
+      return *work_value ;*/
+    return ArgType();
+  }
+
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering.
+   *
+   *  The highest rank thread can compute the reduction total as
+   *    reduction_total = dev.team_scan( value ) + value ;
+   */
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value) const {
+    return this->template team_scan<Type>(value, 0);
+  }
+
+  //----------------------------------------
+  // Private for the driver
+
+ private:
+  using space = execution_space::scratch_memory_space;
+
+ public:
+  // FIXME_OPENMPTARGET - 512(16*32) bytes at the begining of the scratch space
+  // for each league is saved for reduction. It should actually be based on the
+  // ValueType of the reduction variable.
+  inline OpenMPTargetExecTeamMember(
+      const int league_rank, const int league_size, const int team_size,
+      const int vector_length  // const TeamPolicyInternal< OpenMPTarget,
+                               // Properties ...> & team
+      ,
+      void* const glb_scratch, const int shmem_block_index,
+      const size_t shmem_size_L0, const size_t shmem_size_L1)
+      : m_team_scratch_size{shmem_size_L0, shmem_size_L1},
+        m_team_rank(0),
+        m_team_size(team_size),
+        m_league_rank(league_rank),
+        m_league_size(league_size),
+        m_vector_length(vector_length),
+        m_shmem_block_index(shmem_block_index),
+        m_glb_scratch(glb_scratch) {
+    const int omp_tid = omp_get_thread_num();
+
+    // The scratch memory allocated is a sum of TEAM_REDUCE_SIZE, L0 shmem size
+    // and L1 shmem size. TEAM_REDUCE_SIZE = 512 bytes saved per team for
+    // hierarchical reduction. There is an additional 10% of the requested
+    // scratch memory allocated per team as padding. Hence the product with 0.1.
+    const int reduce_offset =
+        m_shmem_block_index *
+        (shmem_size_L0 + shmem_size_L1 +
+         ((shmem_size_L0 + shmem_size_L1) * 0.1) + TEAM_REDUCE_SIZE);
+    const int l0_offset = reduce_offset + TEAM_REDUCE_SIZE;
+    const int l1_offset = l0_offset + shmem_size_L0;
+    m_team_shared       = scratch_memory_space(
+        (static_cast<char*>(glb_scratch) + l0_offset), shmem_size_L0,
+        static_cast<char*>(glb_scratch) + l1_offset, shmem_size_L1);
+    m_reduce_scratch = static_cast<char*>(glb_scratch) + reduce_offset;
+    m_league_rank    = league_rank;
+    m_team_rank      = omp_tid;
+    m_vector_lane    = 0;
+  }
+
+  static inline int team_reduce_size() { return TEAM_REDUCE_SIZE; }
+};
+
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget, Properties...>
+    : public PolicyTraits<Properties...> {
+ public:
+  //! Tag this class as a kokkos execution policy
+  using execution_policy = TeamPolicyInternal;
+
+  using traits = PolicyTraits<Properties...>;
+
+  //----------------------------------------
+
+  template <class FunctorType>
+  inline static int team_size_max(const FunctorType&, const ParallelForTag&) {
+    return 256;
+  }
+
+  template <class FunctorType>
+  inline static int team_size_max(const FunctorType&,
+                                  const ParallelReduceTag&) {
+    return 256;
+  }
+
+  template <class FunctorType, class ReducerType>
+  inline static int team_size_max(const FunctorType&, const ReducerType&,
+                                  const ParallelReduceTag&) {
+    return 256;
+  }
+
+  template <class FunctorType>
+  inline static int team_size_recommended(const FunctorType&,
+                                          const ParallelForTag&) {
+    return 128;
+  }
+
+  template <class FunctorType>
+  inline static int team_size_recommended(const FunctorType&,
+                                          const ParallelReduceTag&) {
+    return 128;
+  }
+
+  template <class FunctorType, class ReducerType>
+  inline static int team_size_recommended(const FunctorType&,
+                                          const ReducerType&,
+                                          const ParallelReduceTag&) {
+    return 128;
+  }
+
+  //----------------------------------------
+
+ private:
+  int m_league_size;
+  int m_team_size;
+  int m_vector_length;
+  int m_team_alloc;
+  int m_team_iter;
+  std::array<size_t, 2> m_team_scratch_size;
+  std::array<size_t, 2> m_thread_scratch_size;
+  bool m_tune_team_size;
+  bool m_tune_vector_length;
+  constexpr const static size_t default_team_size = 256;
+  int m_chunk_size;
+
+  inline void init(const int league_size_request, const int team_size_request,
+                   const int vector_length_request) {
+    m_league_size = league_size_request;
+
+    // Minimum team size should be 32 for OpenMPTarget backend.
+    if (team_size_request < 32) {
+      Kokkos::Impl::OpenMPTarget_abort(
+          "OpenMPTarget backend requires a minimum of 32 threads per team.\n");
+    } else
+      m_team_size = team_size_request;
+
+    m_vector_length = vector_length_request;
+    set_auto_chunk_size();
+  }
+
+  template <typename ExecSpace, typename... OtherProperties>
+  friend class TeamPolicyInternal;
+
+ public:
+  // FIXME_OPENMPTARGET : Currently this routine is a copy of the Cuda
+  // implementation, but this has to be tailored to be architecture specific.
+  inline static int scratch_size_max(int level) {
+    return (
+        level == 0 ? 1024 * 40 :  // 48kB is the max for CUDA, but we need some
+                                  // for team_member.reduce etc.
+            20 * 1024 *
+                1024);  // arbitrarily setting this to 20MB, for a Volta V100
+                        // that would give us about 3.2GB for 2 teams per SM
+  }
+  inline bool impl_auto_team_size() const { return m_tune_team_size; }
+  inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
+  inline void impl_set_team_size(const size_t size) { m_team_size = size; }
+  inline void impl_set_vector_length(const size_t length) {
+    m_tune_vector_length = length;
+  }
+  inline int impl_vector_length() const { return m_vector_length; }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  KOKKOS_DEPRECATED inline int vector_length() const {
+    return impl_vector_length();
+  }
+#endif
+  inline int team_size() const { return m_team_size; }
+  inline int league_size() const { return m_league_size; }
+  inline size_t scratch_size(const int& level, int team_size_ = -1) const {
+    if (team_size_ < 0) team_size_ = m_team_size;
+    return m_team_scratch_size[level] +
+           team_size_ * m_thread_scratch_size[level];
+  }
+
+  inline Kokkos::Experimental::OpenMPTarget space() const {
+    return Kokkos::Experimental::OpenMPTarget();
+  }
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(const TeamPolicyInternal<OtherProperties...>& p)
+      : m_league_size(p.m_league_size),
+        m_team_size(p.m_team_size),
+        m_vector_length(p.m_vector_length),
+        m_team_alloc(p.m_team_alloc),
+        m_team_iter(p.m_team_iter),
+        m_team_scratch_size(p.m_team_scratch_size),
+        m_thread_scratch_size(p.m_thread_scratch_size),
+        m_tune_team_size(p.m_tune_team_size),
+        m_tune_vector_length(p.m_tune_vector_length),
+        m_chunk_size(p.m_chunk_size) {}
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request, int team_size_request,
+                     int vector_length_request = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(false),
+        m_tune_vector_length(false),
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request, vector_length_request);
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     int vector_length_request = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(true),
+        m_tune_vector_length(false),
+        m_chunk_size(0) {
+    init(league_size_request, default_team_size / vector_length_request,
+         vector_length_request);
+  }
+
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(true),
+        m_tune_vector_length(true),
+        m_chunk_size(0) {
+    init(league_size_request, default_team_size, 1);
+  }
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(false),
+        m_tune_vector_length(true),
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request, 1);
+  }
+
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     int vector_length_request = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(false),
+        m_tune_vector_length(false),
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request, vector_length_request);
+  }
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     int vector_length_request = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(true),
+        m_tune_vector_length(false),
+        m_chunk_size(0) {
+    init(league_size_request, default_team_size / vector_length_request,
+         vector_length_request);
+  }
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(true),
+        m_tune_vector_length(true),
+        m_chunk_size(0) {
+    init(league_size_request, default_team_size, 1);
+  }
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_tune_team_size(false),
+        m_tune_vector_length(true),
+        m_chunk_size(0) {
+    init(league_size_request, team_size_request, 1);
+  }
+  inline static size_t vector_length_max() {
+    return 32; /* TODO: this is bad. Need logic that is compiler and backend
+                  aware */
+  }
+  inline int team_alloc() const { return m_team_alloc; }
+  inline int team_iter() const { return m_team_iter; }
+
+  inline int chunk_size() const { return m_chunk_size; }
+
+  /** \brief set chunk_size to a discrete value*/
+  inline TeamPolicyInternal& set_chunk_size(
+      typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  /** \brief set per team scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(const int& level,
+                                              const PerTeamValue& per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  /** \brief set per thread scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerThreadValue& per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  /** \brief set per thread and per team scratch size for a specific level of
+   * the scratch hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerTeamValue& per_team,
+      const PerThreadValue& per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+ private:
+  /** \brief finalize chunk_size if it was set to AUTO*/
+  inline void set_auto_chunk_size() {
+    int concurrency = 2048 * 128;
+
+    if (concurrency == 0) concurrency = 1;
+
+    if (m_chunk_size > 0) {
+      if (!Impl::is_integral_power_of_two(m_chunk_size))
+        Kokkos::abort("TeamPolicy blocking granularity must be power of two");
+    }
+
+    int new_chunk_size = 1;
+    while (new_chunk_size * 100 * concurrency < m_league_size)
+      new_chunk_size *= 2;
+    if (new_chunk_size < 128) {
+      new_chunk_size = 1;
+      while ((new_chunk_size * 40 * concurrency < m_league_size) &&
+             (new_chunk_size < 128))
+        new_chunk_size *= 2;
+    }
+    m_chunk_size = new_chunk_size;
+  }
+
+ public:
+  using member_type = Impl::OpenMPTargetExecTeamMember;
+};
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    iType, Impl::OpenMPTargetExecTeamMember>
+TeamThreadRange(const Impl::OpenMPTargetExecTeamMember& thread,
+                const iType& count) {
+  return Impl::TeamThreadRangeBoundariesStruct<
+      iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
+TeamThreadRange(const Impl::OpenMPTargetExecTeamMember& thread,
+                const iType1& begin, const iType2& end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<
+      iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(begin),
+                                               iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    iType, Impl::OpenMPTargetExecTeamMember>
+ThreadVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+                  const iType& count) {
+  return Impl::ThreadVectorRangeBoundariesStruct<
+      iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
+ThreadVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+                  const iType1& arg_begin, const iType2& arg_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::ThreadVectorRangeBoundariesStruct<
+      iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(arg_begin),
+                                               iType(arg_end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+    iType, Impl::OpenMPTargetExecTeamMember>
+TeamVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+                const iType& count) {
+  return Impl::TeamVectorRangeBoundariesStruct<
+      iType, Impl::OpenMPTargetExecTeamMember>(thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::OpenMPTargetExecTeamMember>
+TeamVectorRange(const Impl::OpenMPTargetExecTeamMember& thread,
+                const iType1& arg_begin, const iType2& arg_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamVectorRangeBoundariesStruct<
+      iType, Impl::OpenMPTargetExecTeamMember>(thread, iType(arg_begin),
+                                               iType(arg_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember> PerTeam(
+    const Impl::OpenMPTargetExecTeamMember& thread) {
+  return Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember> PerThread(
+    const Impl::OpenMPTargetExecTeamMember& thread) {
+  return Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>(thread);
+}
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+/** \brief  Inter-thread parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const Lambda& lambda) {
+#pragma omp for nowait schedule(static, 1)
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
+}
+
+/** \brief  Inter-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team
+ * and a summation of val is performed and put into result.
+ */
+
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ValueType& result) {
+  // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+  // elements in the array <= 32. For reduction we allocate, 16 bytes per
+  // element in the scratch space, hence, 16*32 = 512.
+  static_assert(sizeof(ValueType) <=
+                Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+  ValueType* TeamThread_scratch =
+      static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+  TeamThread_scratch[0] = ValueType();
+#pragma omp barrier
+
+  if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp for reduction(+ : TeamThread_scratch[:1])
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+      ValueType tmp = ValueType();
+      lambda(i, tmp);
+      TeamThread_scratch[0] += tmp;
+    }
+  } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+
+#pragma omp for reduction(custom : TeamThread_scratch[:1])
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+      ValueType tmp = ValueType();
+      lambda(i, tmp);
+      TeamThread_scratch[0] += tmp;
+    }
+  }
+
+  result = TeamThread_scratch[0];
+}
+
+#if !defined(KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND)
+// For some reason the actual version we wanted to write doesn't work
+// and crashes. We should try this with every new compiler
+// This is the variant we actually wanted to write
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ReducerType result) {
+  using ValueType = typename ReducerType::value_type;
+
+#pragma omp declare reduction(                                               \
+    custominner:ValueType                                                    \
+    : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(                                                             \
+        Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+  // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+  // elements in the array <= 32. For reduction we allocate, 16 bytes per
+  // element in the scratch space, hence, 16*32 = 512.
+  static_assert(sizeof(ValueType) <=
+                Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+  ValueType* TeamThread_scratch =
+      static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+  Impl::OpenMPTargetReducerWrapper<ReducerType>::init(TeamThread_scratch[0]);
+#pragma omp barrier
+
+#pragma omp for reduction(custominner : TeamThread_scratch[:1])
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+    lambda(i, TeamThread_scratch[0]);
+  }
+  result.reference() = TeamThread_scratch[0];
+}
+#else
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ReducerType result) {
+  using ValueType = typename ReducerType::value_type;
+
+  // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+  // elements in the array <= 32. For reduction we allocate, 16 bytes per
+  // element in the scratch space, hence, 16*32 = 512.
+  static_assert(sizeof(ValueType) <=
+                Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+  ValueType* TeamThread_scratch =
+      static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp declare reduction(                                               \
+    omp_red_teamthread_reducer:ValueType                                     \
+    : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(                                                             \
+        Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp barrier
+  ValueType tmp;
+  result.init(tmp);
+  TeamThread_scratch[0] = tmp;
+#pragma omp barrier
+
+  iType team_size = iType(omp_get_num_threads());
+#pragma omp for reduction(omp_red_teamthread_reducer \
+                          : TeamThread_scratch[:1]) schedule(static, 1)
+  for (iType t = 0; t < team_size; t++) {
+    ValueType tmp2;
+    result.init(tmp2);
+
+    for (iType i = loop_boundaries.start + t; i < loop_boundaries.end;
+         i += team_size) {
+      lambda(i, tmp2);
+    }
+
+    // FIXME_OPENMPTARGET: Join should work but doesn't. Every threads gets a
+    // private TeamThread_scratch[0] and at the end of the for-loop the `join`
+    // operation is performed by OpenMP itself and hence the simple assignment
+    // works.
+    //    result.join(TeamThread_scratch[0], tmp2);
+    TeamThread_scratch[0] = tmp2;
+  }
+
+  result.reference() = TeamThread_scratch[0];
+}
+#endif  // KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+
+/** \brief  Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a reduction of val is performed using JoinType(ValueType& val, const
+ * ValueType& update) and put into init_result. The input value of init_result
+ * is used as initializer for temporary variables of ValueType. Therefore the
+ * input value should be the neutral element with respect to the join operation
+ * (e.g. '0 for +-' or '1 for *').
+ */
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const Lambda& lambda, const JoinType& join, ValueType& init_result) {
+  ValueType* TeamThread_scratch =
+      static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+  // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+  // elements in the array <= 32. For reduction we allocate, 16 bytes per
+  // element in the scratch space, hence, 16*32 = 512.
+  static_assert(sizeof(ValueType) <=
+                Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+  // FIXME_OPENMPTARGET: Still need to figure out how to get value_count here.
+  const int value_count = 1;
+
+#pragma omp barrier
+  TeamThread_scratch[0] = init_result;
+#pragma omp barrier
+
+#pragma omp for
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+    lambda(i, TeamThread_scratch[omp_get_num_threads() * value_count]);
+  }
+
+  // Reduce all partial results within a team.
+  const int team_size      = omp_get_num_threads();
+  int tree_neighbor_offset = 1;
+  do {
+#pragma omp for
+    for (int i = 0; i < team_size - tree_neighbor_offset;
+         i += 2 * tree_neighbor_offset) {
+      const int neighbor = i + tree_neighbor_offset;
+      join(lambda, &TeamThread_scratch[i * value_count],
+           &TeamThread_scratch[neighbor * value_count]);
+    }
+    tree_neighbor_offset *= 2;
+  } while (tree_neighbor_offset < team_size);
+  init_result = TeamThread_scratch[0];
+}
+
+// This is largely the same code as in HIP and CUDA except for the member name
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_bounds,
+    const FunctorType& lambda) {
+  using Analysis   = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                         TeamPolicy<Experimental::OpenMPTarget>,
+                                         FunctorType>;
+  using value_type = typename Analysis::value_type;
+
+  const auto start = loop_bounds.start;
+  const auto end   = loop_bounds.end;
+  //   Note this thing is called .member in the CUDA specialization of
+  //   TeamThreadRangeBoundariesStruct
+  auto& member         = loop_bounds.team;
+  const auto team_size = member.team_size();
+  const auto team_rank = member.team_rank();
+
+#if defined(KOKKOS_IMPL_TEAM_SCAN_WORKAROUND)
+  value_type scan_val = value_type();
+
+  if (team_rank == 0) {
+    for (iType i = start; i < end; ++i) {
+      lambda(i, scan_val, true);
+    }
+  }
+#pragma omp barrier
+#else
+  const auto nchunk = (end - start + team_size - 1) / team_size;
+  value_type accum  = 0;
+  // each team has to process one or
+  //      more chunks of the prefix scan
+  for (iType i = 0; i < nchunk; ++i) {
+    auto ii = start + i * team_size + team_rank;
+    // local accumulation for this chunk
+    value_type local_accum = 0;
+    // user updates value with prefix value
+    if (ii < loop_bounds.end) lambda(ii, local_accum, false);
+    // perform team scan
+    local_accum = member.team_scan(local_accum);
+    // add this blocks accum to total accumulation
+    auto val = accum + local_accum;
+    // user updates their data with total accumulation
+    if (ii < loop_bounds.end) lambda(ii, val, true);
+    // the last value needs to be propogated to next chunk
+    if (team_rank == team_size - 1) accum = val;
+    // broadcast last value to rest of the team
+    member.team_broadcast(accum, team_size - 1);
+  }
+#endif
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+/** \brief  Intra-thread vector parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const Lambda& lambda) {
+#pragma omp simd
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
+}
+
+/** \brief  Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const Lambda& lambda, ValueType& result) {
+  ValueType vector_reduce = ValueType();
+
+  if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp simd reduction(+ : vector_reduce)
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+      ValueType tmp = ValueType();
+      lambda(i, tmp);
+      vector_reduce += tmp;
+    }
+  } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+
+#pragma omp simd reduction(custom : vector_reduce)
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+      lambda(i, vector_reduce);
+    }
+  }
+
+  result = vector_reduce;
+}
+
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ReducerType const& result) {
+  using ValueType = typename ReducerType::value_type;
+
+#pragma omp declare reduction(                                               \
+    custom:ValueType                                                         \
+    : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(                                                             \
+        Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+  ValueType vector_reduce;
+  Impl::OpenMPTargetReducerWrapper<ReducerType>::init(vector_reduce);
+
+#pragma omp simd reduction(custom : vector_reduce)
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+    lambda(i, vector_reduce);
+  }
+
+  result.reference() = vector_reduce;
+}
+
+/** \brief  Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a reduction of val is performed using JoinType(ValueType& val, const
+ * ValueType& update) and put into init_result. The input value of init_result
+ * is used as initializer for temporary variables of ValueType. Therefore the
+ * input value should be the neutral element with respect to the join operation
+ * (e.g. '0 for +-' or '1 for *').
+ */
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const Lambda& lambda, const JoinType& join, ValueType& init_result) {
+  ValueType result = init_result;
+
+  // FIXME_OPENMPTARGET think about omp simd
+  // join does not work with omp reduction clause
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+    ValueType tmp = ValueType();
+    lambda(i, tmp);
+    join(result, tmp);
+  }
+
+  init_result = result;
+}
+
+/** \brief  Intra-thread vector parallel exclusive prefix sum. Executes
+ * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes in the thread and a scan
+ * operation is performed. Depending on the target execution space the operator
+ * might be called twice: once with final=false and once with final=true. When
+ * final==true val contains the prefix sum value. The contribution of this "i"
+ * needs to be added to val no matter whether final==true or not. In a serial
+ * execution (i.e. team_size==1) the operator is only called once with
+ * final==true. Scan_val will be set to the final sum value over all vector
+ * lanes.
+ */
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const FunctorType& lambda) {
+  using Analysis   = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                         TeamPolicy<Experimental::OpenMPTarget>,
+                                         FunctorType>;
+  using value_type = typename Analysis::value_type;
+
+  value_type scan_val = value_type();
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; ++i) {
+    lambda(i, scan_val, true);
+  }
+}
+
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
+#undef KOKKOS_IMPL_TEAM_SCAN_WORKAROUND
+#endif
+
+namespace Kokkos {
+/** \brief  Intra-team vector parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamVectorRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const Lambda& lambda) {
+#pragma omp for simd nowait schedule(static, 1)
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) lambda(i);
+}
+
+/** \brief  Intra-team vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling team
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamVectorRangeBoundariesStruct<
+        iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+    const Lambda& lambda, ValueType& result) {
+  // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+  // elements in the array <= 32. For reduction we allocate, 16 bytes per
+  // element in the scratch space, hence, 16*32 = 512.
+  static_assert(sizeof(ValueType) <=
+                Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+  ValueType* TeamVector_scratch =
+      static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+  TeamVector_scratch[0] = ValueType();
+#pragma omp barrier
+
+  if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp for simd reduction(+ : TeamVector_scratch[:1])
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+      ValueType tmp = ValueType();
+      lambda(i, tmp);
+      TeamVector_scratch[0] += tmp;
+    }
+  } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+
+#pragma omp for simd reduction(custom : TeamVector_scratch[:1])
+    for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+      ValueType tmp = ValueType();
+      lambda(i, tmp);
+      TeamVector_scratch[0] += tmp;
+    }
+  }
+
+  result = TeamVector_scratch[0];
+}
+
+#if !defined(KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND)
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ReducerType const& result) {
+  using ValueType = typename ReducerType::value_type;
+
+  // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+  // elements in the array <= 32. For reduction we allocate, 16 bytes per
+  // element in the scratch space, hence, 16*32 = 512.
+  static_assert(sizeof(ValueType) <=
+                Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+#pragma omp declare reduction(                                               \
+    custom:ValueType                                                         \
+    : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(                                                             \
+        Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+  ValueType* TeamVector_scratch =
+      static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp barrier
+  Impl::OpenMPTargetReducerWrapper<ReducerType>::init(TeamVector_scratch[0]);
+#pragma omp barrier
+
+#pragma omp for simd reduction(custom : TeamVector_scratch[:1])
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end; i++) {
+    lambda(i, TeamVector_scratch[0]);
+  }
+
+  result.reference() = TeamVector_scratch[0];
+}
+#else
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::OpenMPTargetExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ReducerType const& result) {
+  using ValueType = typename ReducerType::value_type;
+
+  // FIXME_OPENMPTARGET - Make sure that if its an array reduction, number of
+  // elements in the array <= 32. For reduction we allocate, 16 bytes per
+  // element in the scratch space, hence, 16*32 = 512.
+  static_assert(sizeof(ValueType) <=
+                Impl::OpenMPTargetExecTeamMember::TEAM_REDUCE_SIZE);
+
+  ValueType* TeamVector_scratch =
+      static_cast<ValueType*>(loop_boundaries.team.impl_reduce_scratch());
+
+#pragma omp declare reduction(                                               \
+    omp_red_teamthread_reducer:ValueType                                     \
+    : Impl::OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(                                                             \
+        Impl::OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp barrier
+  ValueType tmp;
+  result.init(tmp);
+  TeamVector_scratch[0] = tmp;
+#pragma omp barrier
+
+  iType team_size = iType(omp_get_num_threads());
+#pragma omp for simd reduction(omp_red_teamthread_reducer \
+                               : TeamVector_scratch[:1]) schedule(static, 1)
+  for (iType t = 0; t < team_size; t++) {
+    ValueType tmp2;
+    result.init(tmp2);
+
+    for (iType i = loop_boundaries.start + t; i < loop_boundaries.end;
+         i += team_size) {
+      lambda(i, tmp2);
+    }
+    TeamVector_scratch[0] = tmp2;
+  }
+
+  result.reference() = TeamVector_scratch[0];
+}
+#endif  // KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+}  // namespace Kokkos
+
+#ifdef KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+#undef KOKKOS_IMPL_HIERARCHICAL_REDUCERS_WORKAROUND
+#endif
+
+namespace Kokkos {
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+    /*single_struct*/,
+    const FunctorType& lambda) {
+  lambda();
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+        single_struct,
+    const FunctorType& lambda) {
+  if (single_struct.team_member.team_rank() == 0) lambda();
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+    /*single_struct*/,
+    const FunctorType& lambda, ValueType& val) {
+  lambda(val);
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::OpenMPTargetExecTeamMember>&
+        single_struct,
+    const FunctorType& lambda, ValueType& val) {
+  if (single_struct.team_member.team_rank() == 0) {
+    lambda(val);
+  }
+  single_struct.team_member.team_broadcast(val, 0);
+}
+}  // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_OPENMPTARGETEXEC_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.cpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.cpp
new file mode 100644 (file)
index 0000000..5192176
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(_OPENMP)
+
+// FIXME_OPENMPTARGET - macro for workaround implementation in UniqueToken
+// constructor. undef'ed at the end
+#define KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
+
+#include <Kokkos_OpenMPTarget.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Instance.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <sstream>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+uint32_t OpenMPTargetInternal::impl_get_instance_id() const noexcept {
+  return m_instance_id;
+}
+
+void OpenMPTargetInternal::fence(openmp_fence_is_static is_static) {
+  fence(
+      "Kokkos::Experimental::Impl::OpenMPTargetInternal::fence: Unnamed "
+      "Internal Fence",
+      is_static);
+}
+void OpenMPTargetInternal::fence(const std::string& name,
+                                 openmp_fence_is_static is_static) {
+  if (is_static == openmp_fence_is_static::no) {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<
+        Kokkos::Experimental::OpenMPTarget>(
+        name,
+        Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{
+            impl_get_instance_id()},
+        [&]() {});
+  } else {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<
+        Kokkos::Experimental::OpenMPTarget>(
+        name,
+        Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+            GlobalDeviceSynchronization,
+        [&]() {});
+  }
+}
+int OpenMPTargetInternal::concurrency() { return 128000; }
+const char* OpenMPTargetInternal::name() { return "OpenMPTarget"; }
+void OpenMPTargetInternal::print_configuration(std::ostream& os,
+                                               bool /*verbose*/) const {
+  // FIXME_OPENMPTARGET
+  os << "Using OpenMPTarget\n";
+}
+
+void OpenMPTargetInternal::impl_finalize() {
+  m_is_initialized = false;
+  Kokkos::Impl::OpenMPTargetExec space;
+  if (space.m_lock_array != nullptr) space.clear_lock_array();
+
+  if (space.m_uniquetoken_ptr != nullptr)
+    Kokkos::kokkos_free<Kokkos::Experimental::OpenMPTargetSpace>(
+        space.m_uniquetoken_ptr);
+}
+void OpenMPTargetInternal::impl_initialize() {
+  m_is_initialized = true;
+
+  // FIXME_OPENMPTARGET:  Only fix the number of teams for NVIDIA architectures
+  // from Pascal and upwards.
+#if defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) || \
+    defined(KOKKOS_ARCH_TURING75) || defined(KOKKOS_ARCH_AMPERE)
+#if defined(KOKKOS_COMPILER_CLANG) && (KOKKOS_COMPILER_CLANG >= 1300)
+  omp_set_num_teams(512);
+#endif
+#endif
+}
+int OpenMPTargetInternal::impl_is_initialized() {
+  return m_is_initialized ? 1 : 0;
+}
+
+OpenMPTargetInternal* OpenMPTargetInternal::impl_singleton() {
+  static OpenMPTargetInternal self;
+  return &self;
+}
+
+}  // Namespace Impl
+
+OpenMPTarget::OpenMPTarget()
+    : m_space_instance(Impl::OpenMPTargetInternal::impl_singleton()) {}
+
+const char* OpenMPTarget::name() {
+  return Impl::OpenMPTargetInternal::impl_singleton()->name();
+}
+void OpenMPTarget::print_configuration(std::ostream& os, bool verbose) const {
+  os << "OpenMPTarget Execution Space:\n";
+  os << "  KOKKOS_ENABLE_OPENMPTARGET: yes\n";
+
+  os << "\nOpenMPTarget Runtime Configuration:\n";
+
+  m_space_instance->print_configuration(os, verbose);
+}
+
+uint32_t OpenMPTarget::impl_instance_id() const noexcept {
+  return m_space_instance->impl_get_instance_id();
+}
+
+int OpenMPTarget::concurrency() {
+  return Impl::OpenMPTargetInternal::impl_singleton()->concurrency();
+}
+
+void OpenMPTarget::fence(const std::string& name) {
+  Impl::OpenMPTargetInternal::impl_singleton()->fence(name);
+}
+
+void OpenMPTarget::impl_static_fence(const std::string& name) {
+  Impl::OpenMPTargetInternal::impl_singleton()->fence(
+      name, Kokkos::Experimental::Impl::openmp_fence_is_static::yes);
+}
+
+void OpenMPTarget::impl_initialize(InitializationSettings const&) {
+  Impl::OpenMPTargetInternal::impl_singleton()->impl_initialize();
+}
+void OpenMPTarget::impl_finalize() {
+  Impl::OpenMPTargetInternal::impl_singleton()->impl_finalize();
+}
+int OpenMPTarget::impl_is_initialized() {
+  return Impl::OpenMPTargetInternal::impl_singleton()->impl_is_initialized();
+}
+}  // Namespace Experimental
+
+namespace Impl {
+int g_openmptarget_space_factory_initialized =
+    Kokkos::Impl::initialize_space_factory<Experimental::OpenMPTarget>(
+        "160_OpenMPTarget");
+
+}  // namespace Impl
+}  // Namespace Kokkos
+
+namespace Kokkos {
+namespace Experimental {
+
+UniqueToken<Kokkos::Experimental::OpenMPTarget,
+            Kokkos::Experimental::UniqueTokenScope::Global>::
+    UniqueToken(Kokkos::Experimental::OpenMPTarget const&) {
+#ifdef KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
+  uint32_t* ptr = Kokkos::Impl::OpenMPTargetExec::m_uniquetoken_ptr;
+  int count     = Kokkos::Experimental::OpenMPTarget().concurrency();
+  if (ptr == nullptr) {
+    int size = count * sizeof(uint32_t);
+    ptr      = static_cast<uint32_t*>(
+        Kokkos::kokkos_malloc<Kokkos::Experimental::OpenMPTargetSpace>(
+            "Kokkos::OpenMPTarget::m_uniquetoken_ptr", size));
+    std::vector<uint32_t> h_buf(count, 0);
+    OMPT_SAFE_CALL(omp_target_memcpy(ptr, h_buf.data(), size, 0, 0,
+                                     omp_get_default_device(),
+                                     omp_get_initial_device()));
+
+    Kokkos::Impl::OpenMPTargetExec::m_uniquetoken_ptr = ptr;
+  }
+#else
+// FIXME_OPENMPTARGET - 2 versions of non-working implementations to fill `ptr`
+// with 0's
+// Version 1 - Creating a target region and filling the
+// pointer Error - CUDA error: named symbol not found
+#pragma omp target teams distribute parallel for is_device_ptr(ptr) \
+    map(to                                                          \
+        : size)
+  for (int i = 0; i < count; ++i) ptr[i] = 0;
+
+  // Version 2 : Allocating a view on the device and filling it with a scalar
+  // value of 0.
+  Kokkos::View<uint32_t*, Kokkos::Experimental::OpenMPTargetSpace> ptr_view(
+      ptr, count);
+  Kokkos::deep_copy(ptr_view, 0);
+#endif
+  m_buffer = ptr;
+  m_count  = count;
+}
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#undef KOKKOS_IMPL_OPENMPTARGET_WORKAROUND
+#endif  // defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(_OPENMP)
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Instance.hpp
new file mode 100644 (file)
index 0000000..8e4baf8
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGET_INSTANCE_HPP
+#define KOKKOS_OPENMPTARGET_INSTANCE_HPP
+
+#include <Kokkos_Core.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+enum class openmp_fence_is_static { yes, no };
+
+class OpenMPTargetInternal {
+ private:
+  OpenMPTargetInternal()                            = default;
+  OpenMPTargetInternal(const OpenMPTargetInternal&) = default;
+  OpenMPTargetInternal& operator=(const OpenMPTargetInternal&) = default;
+
+ public:
+  void fence(openmp_fence_is_static is_static = openmp_fence_is_static::no);
+  void fence(const std::string& name,
+             openmp_fence_is_static is_static = openmp_fence_is_static::no);
+
+  /** \brief  Return the maximum amount of concurrency.  */
+  int concurrency();
+
+  //! Print configuration information to the given output stream.
+  void print_configuration(std::ostream& os, bool verbose) const;
+
+  static const char* name();
+
+  //! Free any resources being consumed by the device.
+  void impl_finalize();
+
+  //! Has been initialized
+  int impl_is_initialized();
+  uint32_t impl_get_instance_id() const noexcept;
+  //! Initialize, telling the CUDA run-time library which device to use.
+  void impl_initialize();
+
+  static OpenMPTargetInternal* impl_singleton();
+
+ private:
+  bool m_is_initialized  = false;
+  uint32_t m_instance_id = Kokkos::Tools::Experimental::Impl::idForInstance<
+      Kokkos::Experimental::OpenMPTarget>(reinterpret_cast<uintptr_t>(this));
+};
+}  // Namespace Impl
+}  // Namespace Experimental
+}  // Namespace Kokkos
+
+#endif  // KOKKOS_OPENMPTARGET_INSTANCE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Parallel.hpp
new file mode 100644 (file)
index 0000000..dfb9ea7
--- /dev/null
@@ -0,0 +1,1337 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLEL_HPP
+#define KOKKOS_OPENMPTARGET_PARALLEL_HPP
+
+#include <omp.h>
+#include <sstream>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+                  Kokkos::Experimental::OpenMPTarget> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+ public:
+  void execute() const { execute_impl<WorkTag>(); }
+
+  template <class TagType>
+  void execute_impl() const {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    const auto begin = m_policy.begin();
+    const auto end   = m_policy.end();
+
+    if (end <= begin) return;
+
+    FunctorType a_functor(m_functor);
+
+#pragma omp target teams distribute parallel for map(to : a_functor)
+    for (auto i = begin; i < end; ++i) {
+      if constexpr (std::is_void<TagType>::value) {
+        a_functor(i);
+      } else {
+        a_functor(TagType(), i);
+      }
+    }
+  }
+
+  ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+// This class has the memcpy routine that is commonly used by ParallelReduce
+// over RangePolicy and TeamPolicy.
+template <class PointerType>
+struct ParallelReduceCommon {
+  // Copy the result back to device if the view is on the device.
+  static void memcpy_result(PointerType dest, PointerType src, size_t size,
+                            bool ptr_on_device) {
+    if (ptr_on_device) {
+      OMPT_SAFE_CALL(omp_target_memcpy(dest, src, size, 0, 0,
+                                       omp_get_default_device(),
+                                       omp_get_initial_device()));
+    } else {
+      *dest = *src;
+    }
+  }
+};
+
+template <class FunctorType, class PolicyType, class ReducerType,
+          class PointerType, class ValueType>
+struct ParallelReduceSpecialize {
+  inline static void execute(const FunctorType& /*f*/, const PolicyType& /*p*/,
+                             PointerType /*result_ptr*/) {
+    constexpr int FunctorHasJoin =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+                              FunctorType>::has_join_member_function;
+    constexpr int UseReducerType = is_reducer<ReducerType>::value;
+
+    std::stringstream error_message;
+    error_message << "Error: Invalid Specialization " << FunctorHasJoin << ' '
+                  << UseReducerType << '\n';
+    // FIXME_OPENMPTARGET
+    OpenMPTarget_abort(error_message.str().c_str());
+  }
+};
+
+template <class FunctorType, class ReducerType, class PointerType,
+          class ValueType, class... PolicyArgs>
+struct ParallelReduceSpecialize<FunctorType, Kokkos::RangePolicy<PolicyArgs...>,
+                                ReducerType, PointerType, ValueType> {
+  using PolicyType = Kokkos::RangePolicy<PolicyArgs...>;
+  using TagType    = typename PolicyType::work_tag;
+  using ReducerTypeFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         PolicyType, ReducerTypeFwd>;
+  using ReferenceType = typename Analysis::reference_type;
+
+  using ParReduceCommon = ParallelReduceCommon<PointerType>;
+
+  static void execute_reducer(const FunctorType& f, const PolicyType& p,
+                              PointerType result_ptr, bool ptr_on_device) {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    const auto begin = p.begin();
+    const auto end   = p.end();
+
+    ValueType result;
+    OpenMPTargetReducerWrapper<ReducerType>::init(result);
+
+    // Initialize and copy back the result even if it is a zero length
+    // reduction.
+    if (end <= begin) {
+      ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
+                                     ptr_on_device);
+      return;
+    }
+
+#pragma omp declare reduction(                                         \
+    custom:ValueType                                                   \
+    : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for map(to                    \
+                                                     : f) reduction(custom \
+                                                                    : result)
+    for (auto i = begin; i < end; ++i) {
+      if constexpr (std::is_void<TagType>::value) {
+        f(i, result);
+      } else {
+        f(TagType(), i, result);
+      }
+    }
+
+    ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
+                                   ptr_on_device);
+  }
+
+  template <class TagType, int NumReductions>
+  static void execute_array(const FunctorType& f, const PolicyType& p,
+                            PointerType result_ptr, bool ptr_on_device) {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    const auto begin = p.begin();
+    const auto end   = p.end();
+
+    // Enter the loop if the reduction is on a scalar type.
+    if constexpr (NumReductions == 1) {
+      ValueType result = ValueType();
+
+      // Initialize and copy back the result even if it is a zero length
+      // reduction.
+      if (end <= begin) {
+        ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
+                                       ptr_on_device);
+        return;
+      }
+      // Case where reduction is on a native data type.
+      if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp target teams distribute parallel for \
+         map(to:f) reduction(+: result)
+        for (auto i = begin; i < end; ++i)
+
+          if constexpr (std::is_void<TagType>::value) {
+            f(i, result);
+          } else {
+            f(TagType(), i, result);
+          }
+      } else {
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+#pragma omp target teams distribute parallel for map(to                    \
+                                                     : f) reduction(custom \
+                                                                    : result)
+        for (auto i = begin; i < end; ++i)
+
+          if constexpr (std::is_void<TagType>::value) {
+            f(i, result);
+          } else {
+            f(TagType(), i, result);
+          }
+      }
+
+      ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
+                                     ptr_on_device);
+    } else {
+      ValueType result[NumReductions] = {};
+
+      // Initialize and copy back the result even if it is a zero length
+      // reduction.
+      if (end <= begin) {
+        ParReduceCommon::memcpy_result(result_ptr, result,
+                                       NumReductions * sizeof(ValueType),
+                                       ptr_on_device);
+        return;
+      }
+#pragma omp target teams distribute parallel for map(to:f) reduction(+:result[:NumReductions])
+      for (auto i = begin; i < end; ++i) {
+        if constexpr (std::is_void<TagType>::value) {
+          f(i, result);
+        } else {
+          f(TagType(), i, result);
+        }
+      }
+
+      ParReduceCommon::memcpy_result(
+          result_ptr, result, NumReductions * sizeof(ValueType), ptr_on_device);
+    }
+  }
+
+  static void execute_init_join(const FunctorType& f, const PolicyType& p,
+                                PointerType ptr, const bool ptr_on_device) {
+    const auto begin = p.begin();
+    const auto end   = p.end();
+
+    using FunctorAnalysis =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+                              FunctorType>;
+    constexpr int HasInit = FunctorAnalysis::has_init_member_function;
+
+    // Initialize the result pointer.
+
+    const auto size = end - begin;
+
+    // FIXME_OPENMPTARGET: The team size and MAX_ACTIVE_THREADS are currently
+    // based on NVIDIA-V100 and should be modifid to be based on the
+    // architecture in the future.
+    const int max_team_threads = 32;
+    const int max_teams =
+        OpenMPTargetExec::MAX_ACTIVE_THREADS / max_team_threads;
+    // Number of elements in the reduction
+    const auto value_count = FunctorAnalysis::value_count(f);
+
+    // Allocate scratch per active thread. Achieved by setting the first
+    // parameter of `resize_scratch=1`.
+    OpenMPTargetExec::resize_scratch(1, 0, value_count * sizeof(ValueType),
+                                     std::numeric_limits<int64_t>::max());
+    ValueType* scratch_ptr =
+        static_cast<ValueType*>(OpenMPTargetExec::get_scratch_ptr());
+
+#pragma omp target map(to : f) is_device_ptr(scratch_ptr)
+    {
+      typename FunctorAnalysis::Reducer final_reducer(&f);
+      // Enter this loop if the functor has an `init`
+      if constexpr (HasInit) {
+        // The `init` routine needs to be called on the device since it might
+        // need device members.
+        final_reducer.init(scratch_ptr);
+        final_reducer.final(scratch_ptr);
+      } else {
+        for (int i = 0; i < value_count; ++i) {
+          static_cast<ValueType*>(scratch_ptr)[i] = ValueType();
+        }
+
+        final_reducer.final(scratch_ptr);
+      }
+    }
+
+    if (end <= begin) {
+      // If there is no work to be done, copy back the initialized values and
+      // exit.
+      if (!ptr_on_device)
+        OMPT_SAFE_CALL(omp_target_memcpy(
+            ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+            omp_get_initial_device(), omp_get_default_device()));
+      else
+        OMPT_SAFE_CALL(omp_target_memcpy(
+            ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+            omp_get_default_device(), omp_get_default_device()));
+
+      return;
+    }
+
+#pragma omp target teams num_teams(max_teams) thread_limit(max_team_threads) \
+    map(to                                                                   \
+        : f) is_device_ptr(scratch_ptr)
+    {
+      typename FunctorAnalysis::Reducer final_reducer(&f);
+#pragma omp parallel
+      {
+        const int team_num    = omp_get_team_num();
+        const int num_teams   = omp_get_num_teams();
+        const auto chunk_size = size / num_teams;
+        const auto team_begin = begin + team_num * chunk_size;
+        const auto team_end =
+            (team_num == num_teams - 1) ? end : (team_begin + chunk_size);
+        ValueType* team_scratch =
+            scratch_ptr + team_num * max_team_threads * value_count;
+        ReferenceType result = final_reducer.init(
+            &team_scratch[omp_get_thread_num() * value_count]);
+
+        // Accumulate partial results in thread specific storage.
+#pragma omp for simd
+        for (auto i = team_begin; i < team_end; ++i) {
+          if constexpr (std::is_void<TagType>::value) {
+            f(i, result);
+          } else {
+            f(TagType(), i, result);
+          }
+        }
+
+        // Reduce all paritial results within a team.
+        const int team_size      = max_team_threads;
+        int tree_neighbor_offset = 1;
+        do {
+#pragma omp for simd
+          for (int i = 0; i < team_size - tree_neighbor_offset;
+               i += 2 * tree_neighbor_offset) {
+            const int neighbor = i + tree_neighbor_offset;
+            final_reducer.join(&team_scratch[i * value_count],
+                               &team_scratch[neighbor * value_count]);
+          }
+          tree_neighbor_offset *= 2;
+        } while (tree_neighbor_offset < team_size);
+      }  // end parallel
+    }    // end target
+
+    int tree_neighbor_offset = 1;
+    do {
+#pragma omp target teams distribute parallel for simd map(to   \
+                                                          : f) \
+    is_device_ptr(scratch_ptr)
+      for (int i = 0; i < max_teams - tree_neighbor_offset;
+           i += 2 * tree_neighbor_offset) {
+        typename FunctorAnalysis::Reducer final_reducer(&f);
+        ValueType* team_scratch = scratch_ptr;
+        const int team_offset   = max_team_threads * value_count;
+        final_reducer.join(
+            &team_scratch[i * team_offset],
+            &team_scratch[(i + tree_neighbor_offset) * team_offset]);
+
+        // If `final` is provided by the functor.
+        // Do the final only once at the end.
+        if (tree_neighbor_offset * 2 >= max_teams && omp_get_team_num() == 0 &&
+            omp_get_thread_num() == 0) {
+          final_reducer.final(scratch_ptr);
+        }
+      }
+      tree_neighbor_offset *= 2;
+    } while (tree_neighbor_offset < max_teams);
+
+    // If the result view is on the host, copy back the values via memcpy.
+    if (!ptr_on_device)
+      OMPT_SAFE_CALL(omp_target_memcpy(
+          ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+          omp_get_initial_device(), omp_get_default_device()));
+    else
+      OMPT_SAFE_CALL(omp_target_memcpy(
+          ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+          omp_get_default_device(), omp_get_default_device()));
+  }
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::OpenMPTarget> {
+ private:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+
+  using ReducerTypeFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  static constexpr int HasJoin =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, Policy,
+                            FunctorType>::has_join_member_function;
+  static constexpr int UseReducer = is_reducer<ReducerType>::value;
+  static constexpr int IsArray    = std::is_pointer<reference_type>::value;
+
+  using ParReduceSpecialize =
+      ParallelReduceSpecialize<FunctorType, Policy, ReducerType, pointer_type,
+                               typename Analysis::value_type>;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  bool m_result_ptr_on_device;
+  const int m_result_ptr_num_elems;
+  using TagType = typename Policy::work_tag;
+
+ public:
+  void execute() const {
+    if constexpr (HasJoin) {
+      // Enter this loop if the Functor has a init-join.
+      ParReduceSpecialize::execute_init_join(m_functor, m_policy, m_result_ptr,
+                                             m_result_ptr_on_device);
+    } else if constexpr (UseReducer) {
+      // Enter this loop if the Functor is a reducer type.
+      ParReduceSpecialize::execute_reducer(m_functor, m_policy, m_result_ptr,
+                                           m_result_ptr_on_device);
+    } else if constexpr (IsArray) {
+      // Enter this loop if the reduction is on an array and the routine is
+      // templated over the size of the array.
+      if (m_result_ptr_num_elems <= 2) {
+        ParReduceSpecialize::template execute_array<TagType, 2>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 4) {
+        ParReduceSpecialize::template execute_array<TagType, 4>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 8) {
+        ParReduceSpecialize::template execute_array<TagType, 8>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 16) {
+        ParReduceSpecialize::template execute_array<TagType, 16>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 32) {
+        ParReduceSpecialize::template execute_array<TagType, 32>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else {
+        Kokkos::abort("array reduction length must be <= 32");
+      }
+    } else {
+      // This loop handles the basic scalar reduction.
+      ParReduceSpecialize::template execute_array<TagType, 1>(
+          m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(const FunctorType& arg_functor, Policy& arg_policy,
+                 const ViewType& arg_result_view,
+                 std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result_view.data()),
+        m_result_ptr_on_device(
+            MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_result_ptr_num_elems(arg_result_view.size()) {}
+
+  ParallelReduce(const FunctorType& arg_functor, Policy& arg_policy,
+                 const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_on_device(
+            MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_result_ptr_num_elems(reducer.view().size()) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                   Kokkos::Experimental::OpenMPTarget> {
+ protected:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+  using idx_type  = typename Policy::index_type;
+
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                         Policy, FunctorType>;
+
+  using value_type     = typename Analysis::value_type;
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  std::enable_if_t<std::is_void<TagType>::value> call_with_tag(
+      const FunctorType& f, const idx_type& idx, value_type& val,
+      const bool& is_final) const {
+    f(idx, val, is_final);
+  }
+  template <class TagType>
+  std::enable_if_t<!std::is_void<TagType>::value> call_with_tag(
+      const FunctorType& f, const idx_type& idx, value_type& val,
+      const bool& is_final) const {
+    f(WorkTag(), idx, val, is_final);
+  }
+
+ public:
+  void impl_execute(
+      Kokkos::View<value_type**, Kokkos::LayoutRight,
+                   Kokkos::Experimental::OpenMPTargetSpace>
+          element_values,
+      Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
+          chunk_values,
+      Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count)
+      const {
+    const idx_type N          = m_policy.end() - m_policy.begin();
+    const idx_type chunk_size = 128;
+    const idx_type n_chunks   = (N + chunk_size - 1) / chunk_size;
+    idx_type nteams           = n_chunks > 512 ? 512 : n_chunks;
+    idx_type team_size        = 128;
+
+    FunctorType a_functor(m_functor);
+#pragma omp target teams distribute map(to                             \
+                                        : a_functor) num_teams(nteams) \
+    thread_limit(team_size)
+    for (idx_type team_id = 0; team_id < n_chunks; ++team_id) {
+      typename Analysis::Reducer final_reducer(&a_functor);
+#pragma omp parallel num_threads(team_size)
+      {
+        const idx_type local_offset = team_id * chunk_size;
+
+#pragma omp for
+        for (idx_type i = 0; i < chunk_size; ++i) {
+          const idx_type idx = local_offset + i;
+          value_type val;
+          final_reducer.init(&val);
+          if (idx < N) call_with_tag<WorkTag>(a_functor, idx, val, false);
+          element_values(team_id, i) = val;
+        }
+#pragma omp barrier
+        if (omp_get_thread_num() == 0) {
+          value_type sum;
+          final_reducer.init(&sum);
+          for (idx_type i = 0; i < chunk_size; ++i) {
+            final_reducer.join(&sum, &element_values(team_id, i));
+            element_values(team_id, i) = sum;
+          }
+          chunk_values(team_id) = sum;
+        }
+#pragma omp barrier
+        if (omp_get_thread_num() == 0) {
+          if (Kokkos::atomic_fetch_add(&count(), 1) == n_chunks - 1) {
+            value_type sum;
+            final_reducer.init(&sum);
+            for (idx_type i = 0; i < n_chunks; ++i) {
+              final_reducer.join(&sum, &chunk_values(i));
+              chunk_values(i) = sum;
+            }
+          }
+        }
+      }
+    }
+
+#pragma omp target teams distribute map(to                             \
+                                        : a_functor) num_teams(nteams) \
+    thread_limit(team_size)
+    for (idx_type team_id = 0; team_id < n_chunks; ++team_id) {
+      typename Analysis::Reducer final_reducer(&a_functor);
+#pragma omp parallel num_threads(team_size)
+      {
+        const idx_type local_offset = team_id * chunk_size;
+        value_type offset_value;
+        if (team_id > 0)
+          offset_value = chunk_values(team_id - 1);
+        else
+          final_reducer.init(&offset_value);
+
+#pragma omp for
+        for (idx_type i = 0; i < chunk_size; ++i) {
+          const idx_type idx = local_offset + i;
+          value_type local_offset_value;
+          if (i > 0) {
+            local_offset_value = element_values(team_id, i - 1);
+            // FIXME_OPENMPTARGET We seem to access memory illegaly on AMD GPUs
+#ifdef KOKKOS_ARCH_VEGA
+            if constexpr (Analysis::has_join_member_function) {
+              if constexpr (std::is_void_v<WorkTag>)
+                a_functor.join(local_offset_value, offset_value);
+              else
+                a_functor.join(WorkTag{}, local_offset_value, offset_value);
+            } else
+              local_offset_value += offset_value;
+#else
+            final_reducer.join(&local_offset_value, &offset_value);
+#endif
+          } else
+            local_offset_value = offset_value;
+          if (idx < N)
+            call_with_tag<WorkTag>(a_functor, idx, local_offset_value, true);
+        }
+      }
+    }
+  }
+
+  void execute() const {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    const idx_type N          = m_policy.end() - m_policy.begin();
+    const idx_type chunk_size = 128;
+    const idx_type n_chunks   = (N + chunk_size - 1) / chunk_size;
+
+    // This could be scratch memory per team
+    Kokkos::View<value_type**, Kokkos::LayoutRight,
+                 Kokkos::Experimental::OpenMPTargetSpace>
+        element_values("element_values", n_chunks, chunk_size);
+    Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
+        chunk_values("chunk_values", n_chunks);
+    Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count(
+        "Count");
+
+    impl_execute(element_values, chunk_values, count);
+  }
+
+  //----------------------------------------
+
+  ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+
+  //----------------------------------------
+};
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::Experimental::OpenMPTarget>
+    : public ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                          Kokkos::Experimental::OpenMPTarget> {
+  using base_t     = ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                              Kokkos::Experimental::OpenMPTarget>;
+  using value_type = typename base_t::value_type;
+  value_type& m_returnvalue;
+
+ public:
+  void execute() const {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    const int64_t N        = base_t::m_policy.end() - base_t::m_policy.begin();
+    const int chunk_size   = 128;
+    const int64_t n_chunks = (N + chunk_size - 1) / chunk_size;
+
+    if (N > 0) {
+      // This could be scratch memory per team
+      Kokkos::View<value_type**, Kokkos::LayoutRight,
+                   Kokkos::Experimental::OpenMPTargetSpace>
+          element_values("element_values", n_chunks, chunk_size);
+      Kokkos::View<value_type*, Kokkos::Experimental::OpenMPTargetSpace>
+          chunk_values("chunk_values", n_chunks);
+      Kokkos::View<int64_t, Kokkos::Experimental::OpenMPTargetSpace> count(
+          "Count");
+
+      base_t::impl_execute(element_values, chunk_values, count);
+
+      const int size = base_t::Analysis::value_size(base_t::m_functor);
+      DeepCopy<HostSpace, Kokkos::Experimental::OpenMPTargetSpace>(
+          &m_returnvalue, chunk_values.data() + (n_chunks - 1), size);
+    } else {
+      m_returnvalue = 0;
+    }
+  }
+
+  ParallelScanWithTotal(const FunctorType& arg_functor,
+                        const typename base_t::Policy& arg_policy,
+                        ReturnType& arg_returnvalue)
+      : base_t(arg_functor, arg_policy), m_returnvalue(arg_returnvalue) {}
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::Experimental::OpenMPTarget> {
+ private:
+  using Policy =
+      Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget,
+                                       Properties...>;
+  using WorkTag = typename Policy::work_tag;
+  using Member  = typename Policy::member_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const size_t m_shmem_size;
+
+ public:
+  void execute() const {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    execute_impl<WorkTag>();
+  }
+
+ private:
+  template <class TagType>
+  void execute_impl() const {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    const auto league_size   = m_policy.league_size();
+    const auto team_size     = m_policy.team_size();
+    const auto vector_length = m_policy.impl_vector_length();
+
+    const size_t shmem_size_L0 = m_policy.scratch_size(0, team_size);
+    const size_t shmem_size_L1 = m_policy.scratch_size(1, team_size);
+    OpenMPTargetExec::resize_scratch(team_size, shmem_size_L0, shmem_size_L1,
+                                     league_size);
+
+    void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
+    FunctorType a_functor(m_functor);
+
+    // FIXME_OPENMPTARGET - If the team_size is not a multiple of 32, the
+    // scratch implementation does not work in the Release or RelWithDebugInfo
+    // mode but works in the Debug mode.
+
+    // Maximum active teams possible.
+    int max_active_teams = OpenMPTargetExec::MAX_ACTIVE_THREADS / team_size;
+    // nteams should not exceed the maximum in-flight teams possible.
+    const auto nteams =
+        league_size < max_active_teams ? league_size : max_active_teams;
+
+    // If the league size is <=0, do not launch the kernel.
+    if (nteams <= 0) return;
+
+// Performing our own scheduling of teams to avoid separation of code between
+// teams-distribute and parallel. Gave a 2x performance boost in test cases with
+// the clang compiler. atomic_compare_exchange can be avoided since the standard
+// guarantees that the number of teams specified in the `num_teams` clause is
+// always less than or equal to the maximum concurrently running teams.
+#pragma omp target teams num_teams(nteams) thread_limit(team_size) \
+    map(to                                                         \
+        : a_functor) is_device_ptr(scratch_ptr)
+#pragma omp parallel
+    {
+      const int blockIdx = omp_get_team_num();
+      const int gridDim  = omp_get_num_teams();
+
+      // Iterate through the number of teams until league_size and assign the
+      // league_id accordingly
+      // Guarantee that the compilers respect the `num_teams` clause
+      if (gridDim <= nteams) {
+        for (int league_id = blockIdx; league_id < league_size;
+             league_id += gridDim) {
+          typename Policy::member_type team(
+              league_id, league_size, team_size, vector_length, scratch_ptr,
+              blockIdx, shmem_size_L0, shmem_size_L1);
+          if constexpr (std::is_void<TagType>::value)
+            m_functor(team);
+          else
+            m_functor(TagType(), team);
+        }
+      } else
+        Kokkos::abort("`num_teams` clause was not respected.\n");
+    }
+  }
+
+ public:
+  ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                     FunctorTeamShmemSize<FunctorType>::value(
+                         arg_functor, arg_policy.team_size())) {}
+};
+
+template <class FunctorType, class ReducerType, class PointerType,
+          class ValueType, class... PolicyArgs>
+struct ParallelReduceSpecialize<FunctorType, TeamPolicyInternal<PolicyArgs...>,
+                                ReducerType, PointerType, ValueType> {
+  using PolicyType = TeamPolicyInternal<PolicyArgs...>;
+  using TagType    = typename PolicyType::work_tag;
+  using ReducerTypeFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         PolicyType, ReducerTypeFwd>;
+
+  using ReferenceType = typename Analysis::reference_type;
+
+  using ParReduceCommon = ParallelReduceCommon<PointerType>;
+
+  static void execute_reducer(const FunctorType& f, const PolicyType& p,
+                              PointerType result_ptr, bool ptr_on_device) {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+
+    const int league_size   = p.league_size();
+    const int team_size     = p.team_size();
+    const int vector_length = p.impl_vector_length();
+
+    const size_t shmem_size_L0 = p.scratch_size(0, team_size);
+    const size_t shmem_size_L1 = p.scratch_size(1, team_size);
+    OpenMPTargetExec::resize_scratch(PolicyType::member_type::TEAM_REDUCE_SIZE,
+                                     shmem_size_L0, shmem_size_L1, league_size);
+    void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
+
+    ValueType result = ValueType();
+
+    // Maximum active teams possible.
+    int max_active_teams = OpenMPTargetExec::MAX_ACTIVE_THREADS / team_size;
+    const auto nteams =
+        league_size < max_active_teams ? league_size : max_active_teams;
+
+    // If the league size is <=0, do not launch the kernel.
+    if (nteams <= 0) return;
+
+#pragma omp declare reduction(                                         \
+    custom:ValueType                                                   \
+    : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to   \
+                                                                       : f) \
+    is_device_ptr(scratch_ptr) reduction(custom                             \
+                                         : result)
+#pragma omp parallel reduction(custom : result)
+    {
+      const int blockIdx = omp_get_team_num();
+      const int gridDim  = omp_get_num_teams();
+
+      // Guarantee that the compilers respect the `num_teams` clause
+      if (gridDim <= nteams) {
+        for (int league_id = blockIdx; league_id < league_size;
+             league_id += gridDim) {
+          typename PolicyType::member_type team(
+              league_id, league_size, team_size, vector_length, scratch_ptr,
+              blockIdx, shmem_size_L0, shmem_size_L1);
+          if constexpr (std::is_void<TagType>::value)
+            f(team, result);
+          else
+            f(TagType(), team, result);
+        }
+      } else
+        Kokkos::abort("`num_teams` clause was not respected.\n");
+    }
+
+    // Copy results back to device if `parallel_reduce` is on a device view.
+    ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
+                                   ptr_on_device);
+  }
+
+  template <int NumReductions>
+  static void execute_array(const FunctorType& f, const PolicyType& p,
+                            PointerType result_ptr, bool ptr_on_device) {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+
+    const int league_size   = p.league_size();
+    const int team_size     = p.team_size();
+    const int vector_length = p.impl_vector_length();
+
+    const size_t shmem_size_L0 = p.scratch_size(0, team_size);
+    const size_t shmem_size_L1 = p.scratch_size(1, team_size);
+    OpenMPTargetExec::resize_scratch(PolicyType::member_type::TEAM_REDUCE_SIZE,
+                                     shmem_size_L0, shmem_size_L1, league_size);
+    void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
+
+    // Maximum active teams possible.
+    int max_active_teams = OpenMPTargetExec::MAX_ACTIVE_THREADS / team_size;
+    const auto nteams =
+        league_size < max_active_teams ? league_size : max_active_teams;
+
+    // If the league size is <=0, do not launch the kernel.
+    if (nteams <= 0) return;
+
+    // Case where the number of reduction items is 1.
+    if constexpr (NumReductions == 1) {
+      ValueType result = ValueType();
+
+      // Case where reduction is on a native data type.
+      if constexpr (std::is_arithmetic<ValueType>::value) {
+#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to   \
+                                                                       : f) \
+    is_device_ptr(scratch_ptr) reduction(+: result)
+#pragma omp parallel reduction(+ : result)
+        {
+          const int blockIdx = omp_get_team_num();
+          const int gridDim  = omp_get_num_teams();
+
+          // Guarantee that the compilers respect the `num_teams` clause
+          if (gridDim <= nteams) {
+            for (int league_id = blockIdx; league_id < league_size;
+                 league_id += gridDim) {
+              typename PolicyType::member_type team(
+                  league_id, league_size, team_size, vector_length, scratch_ptr,
+                  blockIdx, shmem_size_L0, shmem_size_L1);
+              if constexpr (std::is_void<TagType>::value)
+                f(team, result);
+              else
+                f(TagType(), team, result);
+            }
+          } else
+            Kokkos::abort("`num_teams` clause was not respected.\n");
+        }
+      } else {
+        // Case where the reduction is on a non-native data type.
+#pragma omp declare reduction(custom:ValueType : omp_out += omp_in)
+#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to   \
+                                                                       : f) \
+    is_device_ptr(scratch_ptr) reduction(custom                             \
+                                         : result)
+#pragma omp parallel reduction(custom : result)
+        {
+          const int blockIdx = omp_get_team_num();
+          const int gridDim  = omp_get_num_teams();
+
+          // Guarantee that the compilers respect the `num_teams` clause
+          if (gridDim <= nteams) {
+            for (int league_id = blockIdx; league_id < league_size;
+                 league_id += gridDim) {
+              typename PolicyType::member_type team(
+                  league_id, league_size, team_size, vector_length, scratch_ptr,
+                  blockIdx, shmem_size_L0, shmem_size_L1);
+              if constexpr (std::is_void<TagType>::value)
+                f(team, result);
+              else
+                f(TagType(), team, result);
+            }
+          } else
+            Kokkos::abort("`num_teams` clause was not respected.\n");
+        }
+      }
+
+      // Copy results back to device if `parallel_reduce` is on a device view.
+      ParReduceCommon::memcpy_result(result_ptr, &result, sizeof(ValueType),
+                                     ptr_on_device);
+    } else {
+      ValueType result[NumReductions] = {};
+      // Case where the reduction is on an array.
+#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to   \
+                                                                       : f) \
+    is_device_ptr(scratch_ptr) reduction(+ : result[:NumReductions])
+#pragma omp parallel reduction(+ : result[:NumReductions])
+      {
+        const int blockIdx = omp_get_team_num();
+        const int gridDim  = omp_get_num_teams();
+
+        // Guarantee that the compilers respect the `num_teams` clause
+        if (gridDim <= nteams) {
+          for (int league_id = blockIdx; league_id < league_size;
+               league_id += gridDim) {
+            typename PolicyType::member_type team(
+                league_id, league_size, team_size, vector_length, scratch_ptr,
+                blockIdx, shmem_size_L0, shmem_size_L1);
+            if constexpr (std::is_void<TagType>::value)
+              f(team, result);
+            else
+              f(TagType(), team, result);
+          }
+        } else
+          Kokkos::abort("`num_teams` clause was not respected.\n");
+      }
+
+      // Copy results back to device if `parallel_reduce` is on a device view.
+      ParReduceCommon::memcpy_result(
+          result_ptr, result, NumReductions * sizeof(ValueType), ptr_on_device);
+    }
+  }
+
+  // FIXME_OPENMPTARGET : This routine is a copy from `parallel_reduce` over
+  // RangePolicy. Need a new implementation.
+  static void execute_init_join(const FunctorType& f, const PolicyType& p,
+                                PointerType ptr, const bool ptr_on_device) {
+    using FunctorAnalysis =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, PolicyType,
+                              FunctorType>;
+    constexpr int HasInit = FunctorAnalysis::has_init_member_function;
+
+    const int league_size   = p.league_size();
+    const int team_size     = p.team_size();
+    const int vector_length = p.impl_vector_length();
+
+    auto begin = 0;
+    auto end   = league_size * team_size + team_size * vector_length;
+
+    const size_t shmem_size_L0 = p.scratch_size(0, team_size);
+    const size_t shmem_size_L1 = p.scratch_size(1, team_size);
+
+    // FIXME_OPENMPTARGET: This would oversubscribe scratch memory since we are
+    // already using the available scratch memory to create temporaries for each
+    // thread.
+    if ((shmem_size_L0 + shmem_size_L1) > 0) {
+      Kokkos::abort(
+          "OpenMPTarget: Scratch memory is not supported in `parallel_reduce` "
+          "over functors with init/join.");
+    }
+
+    const auto nteams = league_size;
+
+    // Number of elements in the reduction
+    const auto value_count = FunctorAnalysis::value_count(f);
+
+    // Allocate scratch per active thread.
+    OpenMPTargetExec::resize_scratch(1, 0, value_count * sizeof(ValueType),
+                                     league_size);
+    void* scratch_ptr = OpenMPTargetExec::get_scratch_ptr();
+
+    // Enter this loop if the functor has an `init`
+    if constexpr (HasInit) {
+      // The `init` routine needs to be called on the device since it might need
+      // device members.
+#pragma omp target map(to : f) is_device_ptr(scratch_ptr)
+      {
+        typename FunctorAnalysis::Reducer final_reducer(&f);
+        final_reducer.init(scratch_ptr);
+        final_reducer.final(scratch_ptr);
+      }
+    } else {
+#pragma omp target map(to : f) is_device_ptr(scratch_ptr)
+      {
+        for (int i = 0; i < value_count; ++i) {
+          static_cast<ValueType*>(scratch_ptr)[i] = ValueType();
+        }
+
+        typename FunctorAnalysis::Reducer final_reducer(&f);
+        final_reducer.final(static_cast<ValueType*>(scratch_ptr));
+      }
+    }
+
+    if (end <= begin) {
+      // If there is no work to be done, copy back the initialized values and
+      // exit.
+      if (!ptr_on_device)
+        OMPT_SAFE_CALL(omp_target_memcpy(
+            ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+            omp_get_initial_device(), omp_get_default_device()));
+      else
+        OMPT_SAFE_CALL(omp_target_memcpy(
+            ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+            omp_get_default_device(), omp_get_default_device()));
+
+      return;
+    }
+
+#pragma omp target teams num_teams(nteams) thread_limit(team_size) map(to   \
+                                                                       : f) \
+    is_device_ptr(scratch_ptr)
+    {
+#pragma omp parallel
+      {
+        const int team_num      = omp_get_team_num();
+        const int num_teams     = omp_get_num_teams();
+        ValueType* team_scratch = static_cast<ValueType*>(scratch_ptr) +
+                                  team_num * team_size * value_count;
+        typename FunctorAnalysis::Reducer final_reducer(&f);
+        ReferenceType result = final_reducer.init(&team_scratch[0]);
+
+        for (int league_id = team_num; league_id < league_size;
+             league_id += num_teams) {
+          typename PolicyType::member_type team(
+              league_id, league_size, team_size, vector_length, scratch_ptr,
+              team_num, shmem_size_L0, shmem_size_L1);
+          if constexpr (std::is_void<TagType>::value) {
+            f(team, result);
+          } else {
+            f(TagType(), team, result);
+          }
+        }
+      }  // end parallel
+    }    // end target
+
+    int tree_neighbor_offset = 1;
+    do {
+#pragma omp target teams distribute parallel for simd map(to   \
+                                                          : f) \
+    is_device_ptr(scratch_ptr)
+      for (int i = 0; i < nteams - tree_neighbor_offset;
+           i += 2 * tree_neighbor_offset) {
+        ValueType* team_scratch = static_cast<ValueType*>(scratch_ptr);
+        const int team_offset   = team_size * value_count;
+        typename FunctorAnalysis::Reducer final_reducer(&f);
+        final_reducer.join(
+            &team_scratch[i * team_offset],
+            &team_scratch[(i + tree_neighbor_offset) * team_offset]);
+
+        // If `final` is provided by the functor.
+        // Do the final only once at the end.
+        if (tree_neighbor_offset * 2 >= nteams && omp_get_team_num() == 0 &&
+            omp_get_thread_num() == 0) {
+          final_reducer.final(scratch_ptr);
+        }
+      }
+      tree_neighbor_offset *= 2;
+    } while (tree_neighbor_offset < nteams);
+
+    // If the result view is on the host, copy back the values via memcpy.
+    if (!ptr_on_device)
+      OMPT_SAFE_CALL(omp_target_memcpy(
+          ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+          omp_get_initial_device(), omp_get_default_device()));
+    else
+      OMPT_SAFE_CALL(omp_target_memcpy(
+          ptr, scratch_ptr, value_count * sizeof(ValueType), 0, 0,
+          omp_get_default_device(), omp_get_default_device()));
+  }
+};
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::Experimental::OpenMPTarget> {
+ private:
+  using Policy =
+      Kokkos::Impl::TeamPolicyInternal<Kokkos::Experimental::OpenMPTarget,
+                                       Properties...>;
+
+  using WorkTag = typename Policy::work_tag;
+  using Member  = typename Policy::member_type;
+  using ReducerTypeFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using WorkTagFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
+                         void>;
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using value_type     = typename Analysis::value_type;
+
+  bool m_result_ptr_on_device;
+  const int m_result_ptr_num_elems;
+
+  static constexpr int HasJoin =
+      Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, Policy,
+                            FunctorType>::has_join_member_function;
+  static constexpr int UseReducer = is_reducer<ReducerType>::value;
+  static constexpr int IsArray    = std::is_pointer<reference_type>::value;
+
+  using ParReduceSpecialize =
+      ParallelReduceSpecialize<FunctorType, Policy, ReducerType, pointer_type,
+                               typename Analysis::value_type>;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const size_t m_shmem_size;
+
+ public:
+  void execute() const {
+    if constexpr (HasJoin) {
+      ParReduceSpecialize::execute_init_join(m_functor, m_policy, m_result_ptr,
+                                             m_result_ptr_on_device);
+    } else if constexpr (UseReducer) {
+      ParReduceSpecialize::execute_reducer(m_functor, m_policy, m_result_ptr,
+                                           m_result_ptr_on_device);
+    } else if constexpr (IsArray) {
+      if (m_result_ptr_num_elems <= 2) {
+        ParReduceSpecialize::template execute_array<2>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 4) {
+        ParReduceSpecialize::template execute_array<4>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 8) {
+        ParReduceSpecialize::template execute_array<8>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 16) {
+        ParReduceSpecialize::template execute_array<16>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else if (m_result_ptr_num_elems <= 32) {
+        ParReduceSpecialize::template execute_array<32>(
+            m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+      } else {
+        Kokkos::abort("array reduction length must be <= 32");
+      }
+    } else {
+      ParReduceSpecialize::template execute_array<1>(
+          m_functor, m_policy, m_result_ptr, m_result_ptr_on_device);
+    }
+  }
+
+  template <class ViewType>
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const ViewType& arg_result,
+                 std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void*> = nullptr)
+      : m_result_ptr_on_device(
+            MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_result_ptr_num_elems(arg_result.size()),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                     FunctorTeamShmemSize<FunctorType>::value(
+                         arg_functor, arg_policy.team_size())) {}
+
+  ParallelReduce(const FunctorType& arg_functor, Policy& arg_policy,
+                 const ReducerType& reducer)
+      : m_result_ptr_on_device(
+            MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_result_ptr_num_elems(reducer.view().size()),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_shmem_size(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                     FunctorTeamShmemSize<FunctorType>::value(
+                         arg_functor, arg_policy.team_size())) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename iType>
+struct TeamThreadRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
+  using index_type = iType;
+  const iType start;
+  const iType end;
+  const OpenMPTargetExecTeamMember& team;
+
+  TeamThreadRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+                                  iType count)
+      : start(0), end(count), team(thread_) {}
+  TeamThreadRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+                                  iType begin_, iType end_)
+      : start(begin_), end(end_), team(thread_) {}
+};
+
+template <typename iType>
+struct ThreadVectorRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
+  using index_type = iType;
+  const index_type start;
+  const index_type end;
+  const OpenMPTargetExecTeamMember& team;
+
+  ThreadVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+                                    index_type count)
+      : start(0), end(count), team(thread_) {}
+  ThreadVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+                                    index_type begin_, index_type end_)
+      : start(begin_), end(end_), team(thread_) {}
+};
+
+template <typename iType>
+struct TeamVectorRangeBoundariesStruct<iType, OpenMPTargetExecTeamMember> {
+  using index_type = iType;
+  const index_type start;
+  const index_type end;
+  const OpenMPTargetExecTeamMember& team;
+
+  TeamVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+                                  index_type count)
+      : start(0), end(count), team(thread_) {}
+  TeamVectorRangeBoundariesStruct(const OpenMPTargetExecTeamMember& thread_,
+                                  index_type begin_, index_type end_)
+      : start(begin_), end(end_), team(thread_) {}
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* KOKKOS_OPENMPTARGET_PARALLEL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Parallel_MDRange.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Parallel_MDRange.hpp
new file mode 100644 (file)
index 0000000..2399b42
--- /dev/null
@@ -0,0 +1,836 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGET_PARALLEL_MDRANGE_HPP
+#define KOKKOS_OPENMPTARGET_PARALLEL_MDRANGE_HPP
+
+#include <omp.h>
+#include <Kokkos_Parallel.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_Exec.hpp>
+
+// WORKAROUND OPENMPTARGET: sometimes tile sizes don't make it correctly,
+// this was tracked down to a bug in clang with regards of mapping structs
+// with arrays of long in it. Arrays of int might be fine though ...
+#define KOKKOS_IMPL_MDRANGE_USE_NO_TILES  // undef EOF
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                  Kokkos::Experimental::OpenMPTarget> {
+ private:
+  using Policy  = Kokkos::MDRangePolicy<Traits...>;
+  using WorkTag = typename Policy::work_tag;
+  using Member  = typename Policy::member_type;
+  using Index   = typename Policy::index_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+ public:
+  inline void execute() const {
+    OpenMPTargetExec::verify_is_process(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    OpenMPTargetExec::verify_initialized(
+        "Kokkos::Experimental::OpenMPTarget parallel_for");
+    FunctorType functor(m_functor);
+    Policy policy = m_policy;
+
+#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
+    typename Policy::point_type unused;
+
+    execute_tile<Policy::rank>(unused, functor, policy);
+#else
+    const int64_t begin = 0;
+    const int64_t end   = m_policy.m_num_tiles;
+
+#pragma omp target teams distribute map(to : functor) num_teams(end - begin)
+    {
+      for (ptrdiff_t tile_idx = begin; tile_idx < end; ++tile_idx) {
+
+#pragma omp parallel
+        {
+          typename Policy::point_type offset;
+          if (Policy::outer_direction == Policy::Left) {
+            for (int i = 0; i < Policy::rank; ++i) {
+              offset[i] = (tile_idx % policy.m_tile_end[i]) * policy.m_tile[i] +
+                          policy.m_lower[i];
+              tile_idx /= policy.m_tile_end[i];
+            }
+          } else {
+            for (int i = Policy::rank - 1; i >= 0; --i) {
+              offset[i] = (tile_idx % policy.m_tile_end[i]) * policy.m_tile[i] +
+                          policy.m_lower[i];
+              tile_idx /= policy.m_tile_end[i];
+            }
+          }
+          execute_tile<Policy::rank>(offset, functor, policy);
+        }
+      }
+    }
+#endif
+  }
+
+  template <int Rank>
+  inline std::enable_if_t<Rank == 2> execute_tile(
+      typename Policy::point_type offset, const FunctorType& functor,
+      const Policy& policy) const {
+#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
+    (void)offset;
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor)
+    for (auto i0 = begin_0; i0 < end_0; ++i0) {
+      for (auto i1 = begin_1; i1 < end_1; ++i1) {
+        if constexpr (std::is_void<typename Policy::work_tag>::value)
+          functor(i0, i1);
+        else
+          functor(typename Policy::work_tag(), i0, i1);
+      }
+    }
+#else
+    const ptrdiff_t begin_0 = offset[0];
+    ptrdiff_t end_0         = begin_0 + policy.m_tile[0];
+    end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
+
+    const ptrdiff_t begin_1 = offset[1];
+    ptrdiff_t end_1         = begin_1 + policy.m_tile[1];
+    end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
+
+#pragma omp for collapse(2)
+    for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
+      for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1) {
+        if constexpr (std::is_void<typename Policy::work_tag>::value)
+          functor(i0, i1);
+        else
+          functor(typename Policy::work_tag(), i0, i1);
+      }
+#endif
+  }
+
+  template <int Rank>
+  inline std::enable_if_t<Rank == 3> execute_tile(
+      typename Policy::point_type offset, const FunctorType& functor,
+      const Policy& policy) const {
+#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
+    (void)offset;
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[2];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor)
+    for (auto i0 = begin_0; i0 < end_0; ++i0) {
+      for (auto i1 = begin_1; i1 < end_1; ++i1) {
+        for (auto i2 = begin_2; i2 < end_2; ++i2) {
+          if constexpr (std::is_void<typename Policy::work_tag>::value)
+            functor(i0, i1, i2);
+          else
+            functor(typename Policy::work_tag(), i0, i1, i2);
+        }
+      }
+    }
+#else
+    const ptrdiff_t begin_0 = offset[0];
+    ptrdiff_t end_0         = begin_0 + policy.m_tile[0];
+    end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
+
+    const ptrdiff_t begin_1 = offset[1];
+    ptrdiff_t end_1         = begin_1 + policy.m_tile[1];
+    end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
+
+    const ptrdiff_t begin_2 = offset[2];
+    ptrdiff_t end_2         = begin_2 + policy.m_tile[2];
+    end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
+
+#pragma omp for collapse(3)
+    for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
+      for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
+        for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2) {
+          if constexpr (std::is_void<typename Policy::work_tag>::value)
+            functor(i0, i1, i2);
+          else
+            functor(typename Policy::work_tag(), i0, i1, i2);
+        }
+#endif
+  }
+
+  template <int Rank>
+  inline std::enable_if_t<Rank == 4> execute_tile(
+      typename Policy::point_type offset, const FunctorType& functor,
+      const Policy& policy) const {
+#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
+    (void)offset;
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[2];
+    const Index begin_3 = policy.m_lower[3];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+    const Index end_3 = policy.m_upper[3];
+
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor)
+    for (auto i0 = begin_0; i0 < end_0; ++i0) {
+      for (auto i1 = begin_1; i1 < end_1; ++i1) {
+        for (auto i2 = begin_2; i2 < end_2; ++i2) {
+          for (auto i3 = begin_3; i3 < end_3; ++i3) {
+            if constexpr (std::is_void<typename Policy::work_tag>::value)
+              functor(i0, i1, i2, i3);
+            else
+              functor(typename Policy::work_tag(), i0, i1, i2, i3);
+          }
+        }
+      }
+    }
+#else
+    const ptrdiff_t begin_0 = offset[0];
+    ptrdiff_t end_0         = begin_0 + policy.m_tile[0];
+    end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
+
+    const ptrdiff_t begin_1 = offset[1];
+    ptrdiff_t end_1         = begin_1 + policy.m_tile[1];
+    end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
+
+    const ptrdiff_t begin_2 = offset[2];
+    ptrdiff_t end_2         = begin_2 + policy.m_tile[2];
+    end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
+
+    const ptrdiff_t begin_3 = offset[3];
+    ptrdiff_t end_3         = begin_3 + policy.m_tile[3];
+    end_3 = end_3 < policy.m_upper[3] ? end_3 : policy.m_upper[3];
+
+#pragma omp for collapse(4)
+    for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
+      for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
+        for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2)
+          for (ptrdiff_t i3 = begin_3; i3 < end_3; ++i3) {
+            if constexpr (std::is_void<typename Policy::work_tag>::value)
+              functor(i0, i1, i2, i3);
+            else
+              functor(typename Policy::work_tag(), i0, i1, i2, i3);
+          }
+#endif
+  }
+
+  template <int Rank>
+  inline std::enable_if_t<Rank == 5> execute_tile(
+      typename Policy::point_type offset, const FunctorType& functor,
+      const Policy& policy) const {
+#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
+    (void)offset;
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[2];
+    const Index begin_3 = policy.m_lower[3];
+    const Index begin_4 = policy.m_lower[4];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+    const Index end_3 = policy.m_upper[3];
+    const Index end_4 = policy.m_upper[4];
+
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor)
+    for (auto i0 = begin_0; i0 < end_0; ++i0) {
+      for (auto i1 = begin_1; i1 < end_1; ++i1) {
+        for (auto i2 = begin_2; i2 < end_2; ++i2) {
+          for (auto i3 = begin_3; i3 < end_3; ++i3) {
+            for (auto i4 = begin_4; i4 < end_4; ++i4) {
+              if constexpr (std::is_same<typename Policy::work_tag,
+                                         void>::value)
+                functor(i0, i1, i2, i3, i4);
+              else
+                functor(typename Policy::work_tag(), i0, i1, i2, i3, i4);
+            }
+          }
+        }
+      }
+    }
+#else
+    const ptrdiff_t begin_0 = offset[0];
+    ptrdiff_t end_0         = begin_0 + policy.m_tile[0];
+    end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
+
+    const ptrdiff_t begin_1 = offset[1];
+    ptrdiff_t end_1         = begin_1 + policy.m_tile[1];
+    end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
+
+    const ptrdiff_t begin_2 = offset[2];
+    ptrdiff_t end_2         = begin_2 + policy.m_tile[2];
+    end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
+
+    const ptrdiff_t begin_3 = offset[3];
+    ptrdiff_t end_3         = begin_3 + policy.m_tile[3];
+    end_3 = end_3 < policy.m_upper[3] ? end_3 : policy.m_upper[3];
+
+    const ptrdiff_t begin_4 = offset[4];
+    ptrdiff_t end_4         = begin_4 + policy.m_tile[4];
+    end_4 = end_4 < policy.m_upper[4] ? end_4 : policy.m_upper[4];
+
+#pragma omp for collapse(5)
+    for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
+      for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
+        for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2)
+          for (ptrdiff_t i3 = begin_3; i3 < end_3; ++i3)
+            for (ptrdiff_t i4 = begin_4; i4 < end_4; ++i4) {
+              if constexpr (std::is_same<typename Policy::work_tag,
+                                         void>::value)
+                functor(i0, i1, i2, i3, i4);
+              else
+                functor(typename Policy::work_tag(), i0, i1, i2, i3, i4);
+            }
+#endif
+  }
+
+  template <int Rank>
+  inline std::enable_if_t<Rank == 6> execute_tile(
+      typename Policy::point_type offset, const FunctorType& functor,
+      const Policy& policy) const {
+#ifdef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
+    (void)offset;
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[2];
+    const Index begin_3 = policy.m_lower[3];
+    const Index begin_4 = policy.m_lower[4];
+    const Index begin_5 = policy.m_lower[5];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+    const Index end_3 = policy.m_upper[3];
+    const Index end_4 = policy.m_upper[4];
+    const Index end_5 = policy.m_upper[5];
+
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor)
+    for (auto i0 = begin_0; i0 < end_0; ++i0) {
+      for (auto i1 = begin_1; i1 < end_1; ++i1) {
+        for (auto i2 = begin_2; i2 < end_2; ++i2) {
+          for (auto i3 = begin_3; i3 < end_3; ++i3) {
+            for (auto i4 = begin_4; i4 < end_4; ++i4) {
+              for (auto i5 = begin_5; i5 < end_5; ++i5) {
+                {
+                  if constexpr (std::is_same<typename Policy::work_tag,
+                                             void>::value)
+                    functor(i0, i1, i2, i3, i4, i5);
+                  else
+                    functor(typename Policy::work_tag(), i0, i1, i2, i3, i4,
+                            i5);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+#else
+    const ptrdiff_t begin_0 = offset[0];
+    ptrdiff_t end_0         = begin_0 + policy.m_tile[0];
+    end_0 = end_0 < policy.m_upper[0] ? end_0 : policy.m_upper[0];
+
+    const ptrdiff_t begin_1 = offset[1];
+    ptrdiff_t end_1         = begin_1 + policy.m_tile[1];
+    end_1 = end_1 < policy.m_upper[1] ? end_1 : policy.m_upper[1];
+
+    const ptrdiff_t begin_2 = offset[2];
+    ptrdiff_t end_2         = begin_2 + policy.m_tile[2];
+    end_2 = end_2 < policy.m_upper[2] ? end_2 : policy.m_upper[2];
+
+    const ptrdiff_t begin_3 = offset[3];
+    ptrdiff_t end_3         = begin_3 + policy.m_tile[3];
+    end_3 = end_3 < policy.m_upper[3] ? end_3 : policy.m_upper[3];
+
+    const ptrdiff_t begin_4 = offset[4];
+    ptrdiff_t end_4         = begin_4 + policy.m_tile[4];
+    end_4 = end_4 < policy.m_upper[4] ? end_4 : policy.m_upper[4];
+
+    const ptrdiff_t begin_5 = offset[5];
+    ptrdiff_t end_5         = begin_5 + policy.m_tile[5];
+    end_5 = end_5 < policy.m_upper[5] ? end_5 : policy.m_upper[5];
+
+#pragma omp for collapse(6)
+    for (ptrdiff_t i0 = begin_0; i0 < end_0; ++i0)
+      for (ptrdiff_t i1 = begin_1; i1 < end_1; ++i1)
+        for (ptrdiff_t i2 = begin_2; i2 < end_2; ++i2)
+          for (ptrdiff_t i3 = begin_3; i3 < end_3; ++i3)
+            for (ptrdiff_t i4 = begin_4; i4 < end_4; ++i4)
+              for (ptrdiff_t i5 = begin_5; i5 < end_5; ++i5) {
+                if constexpr (std::is_same<typename Policy::work_tag,
+                                           void>::value)
+                  functor(i0, i1, i2, i3, i4, i5);
+                else
+                  functor(typename Policy::work_tag(), i0, i1, i2, i3, i4, i5);
+              }
+#endif
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, Policy arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+  // TODO DZP: based on a conversation with Christian, we're using 256 as a
+  // heuristic here. We need something better once we can query these kinds of
+  // properties
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    return 256;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::OpenMPTarget> {
+ private:
+  using Policy = Kokkos::MDRangePolicy<Traits...>;
+
+  using WorkTag = typename Policy::work_tag;
+  using Member  = typename Policy::member_type;
+  using Index   = typename Policy::index_type;
+
+  using ReducerConditional =
+      std::conditional<std::is_same<InvalidType, ReducerType>::value,
+                       FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  enum {
+    HasJoin =
+        Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE, Policy,
+                              FunctorType>::has_join_member_function
+  };
+  enum { UseReducer = is_reducer<ReducerType>::value };
+
+  const pointer_type m_result_ptr;
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+
+  using ParReduceCommon = ParallelReduceCommon<pointer_type>;
+
+  bool m_result_ptr_on_device;
+
+ public:
+  inline void execute() const {
+    execute_tile<Policy::rank, typename Analysis::value_type>(
+        m_functor, m_policy, m_result_ptr);
+  }
+
+  template <class ViewType>
+  inline ParallelReduce(
+      const FunctorType& arg_functor, Policy arg_policy,
+      const ViewType& arg_result_view,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                           !Kokkos::is_reducer<ReducerType>::value,
+                       void*> = NULL)
+      : m_result_ptr(arg_result_view.data()),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr_on_device(
+            MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+                              typename ViewType::memory_space>::accessible) {}
+
+  inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
+                        const ReducerType& reducer)
+      : m_result_ptr(reducer.view().data()),
+        m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr_on_device(
+            MemorySpaceAccess<Kokkos::Experimental::OpenMPTargetSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible) {}
+
+  template <int Rank, class ValueType>
+  inline std::enable_if_t<Rank == 2> execute_tile(const FunctorType& functor,
+                                                  const Policy& policy,
+                                                  pointer_type ptr) const {
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+
+    ValueType result = ValueType();
+
+    // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+    // loops which leads to code duplication for different reduction types.
+    if constexpr (UseReducer) {
+#pragma omp declare reduction(                                         \
+    custom:ValueType                                                   \
+    : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(2) map(to         \
+                                                                 : functor) \
+    reduction(custom                                                        \
+              : result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          if constexpr (std::is_void<typename Policy::work_tag>::value)
+            functor(i0, i1, result);
+          else
+            functor(typename Policy::work_tag(), i0, i1, result);
+        }
+      }
+    } else {
+#pragma omp target teams distribute parallel for collapse(2) map(to : functor) \
+reduction(+:result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          if constexpr (std::is_void<typename Policy::work_tag>::value)
+            functor(i0, i1, result);
+          else
+            functor(typename Policy::work_tag(), i0, i1, result);
+        }
+      }
+    }
+
+    ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
+                                   m_result_ptr_on_device);
+  }
+
+  template <int Rank, class ValueType>
+  inline std::enable_if_t<Rank == 3> execute_tile(const FunctorType& functor,
+                                                  const Policy& policy,
+                                                  pointer_type ptr) const {
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[2];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+
+    ValueType result = ValueType();
+
+    // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+    // loops which leads to code duplication for different reduction types.
+    if constexpr (UseReducer) {
+#pragma omp declare reduction(                                         \
+    custom:ValueType                                                   \
+    : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(3) map(to         \
+                                                                 : functor) \
+    reduction(custom                                                        \
+              : result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            if constexpr (std::is_void<typename Policy::work_tag>::value)
+              functor(i0, i1, i2, result);
+            else
+              functor(typename Policy::work_tag(), i0, i1, i2, result);
+          }
+        }
+      }
+    } else {
+#pragma omp target teams distribute parallel for collapse(3) map(to : functor) \
+reduction(+:result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            if constexpr (std::is_void<typename Policy::work_tag>::value)
+              functor(i0, i1, i2, result);
+            else
+              functor(typename Policy::work_tag(), i0, i1, i2, result);
+          }
+        }
+      }
+    }
+
+    ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
+                                   m_result_ptr_on_device);
+  }
+
+  template <int Rank, class ValueType>
+  inline std::enable_if_t<Rank == 4> execute_tile(const FunctorType& functor,
+                                                  const Policy& policy,
+                                                  pointer_type ptr) const {
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[3];
+    const Index begin_3 = policy.m_lower[2];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+    const Index end_3 = policy.m_upper[3];
+
+    ValueType result = ValueType();
+
+    // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+    // loops which leads to code duplication for different reduction types.
+    if constexpr (UseReducer) {
+#pragma omp declare reduction(                                         \
+    custom:ValueType                                                   \
+    : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(4) map(to         \
+                                                                 : functor) \
+    reduction(custom                                                        \
+              : result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            for (auto i3 = begin_3; i3 < end_3; ++i3) {
+              if constexpr (std::is_same<typename Policy::work_tag,
+                                         void>::value)
+                functor(i0, i1, i2, i3, result);
+              else
+                functor(typename Policy::work_tag(), i0, i1, i2, i3, result);
+            }
+          }
+        }
+      }
+    } else {
+#pragma omp target teams distribute parallel for collapse(4) map(to : functor) \
+reduction(+:result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            for (auto i3 = begin_3; i3 < end_3; ++i3) {
+              if constexpr (std::is_same<typename Policy::work_tag,
+                                         void>::value)
+                functor(i0, i1, i2, i3, result);
+              else
+                functor(typename Policy::work_tag(), i0, i1, i2, i3, result);
+            }
+          }
+        }
+      }
+    }
+
+    ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
+                                   m_result_ptr_on_device);
+  }
+
+  template <int Rank, class ValueType>
+  inline std::enable_if_t<Rank == 5> execute_tile(const FunctorType& functor,
+                                                  const Policy& policy,
+                                                  pointer_type ptr) const {
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[2];
+    const Index begin_3 = policy.m_lower[3];
+    const Index begin_4 = policy.m_lower[4];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+    const Index end_3 = policy.m_upper[3];
+    const Index end_4 = policy.m_upper[4];
+
+    ValueType result = ValueType();
+
+    // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+    // loops which leads to code duplication for different reduction types.
+    if constexpr (UseReducer) {
+#pragma omp declare reduction(                                         \
+    custom:ValueType                                                   \
+    : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(5) map(to         \
+                                                                 : functor) \
+    reduction(custom                                                        \
+              : result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            for (auto i3 = begin_3; i3 < end_3; ++i3) {
+              for (auto i4 = begin_4; i4 < end_4; ++i4) {
+                if constexpr (std::is_same<typename Policy::work_tag,
+                                           void>::value)
+                  functor(i0, i1, i2, i3, i4, result);
+                else
+                  functor(typename Policy::work_tag(), i0, i1, i2, i3, i4,
+                          result);
+              }
+            }
+          }
+        }
+      }
+    } else {
+#pragma omp target teams distribute parallel for collapse(5) map(to : functor) \
+reduction(+:result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            for (auto i3 = begin_3; i3 < end_3; ++i3) {
+              for (auto i4 = begin_4; i4 < end_4; ++i4) {
+                if constexpr (std::is_same<typename Policy::work_tag,
+                                           void>::value)
+                  functor(i0, i1, i2, i3, i4, result);
+                else
+                  functor(typename Policy::work_tag(), i0, i1, i2, i3, i4,
+                          result);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
+                                   m_result_ptr_on_device);
+  }
+
+  template <int Rank, class ValueType>
+  inline std::enable_if_t<Rank == 6> execute_tile(const FunctorType& functor,
+                                                  const Policy& policy,
+                                                  pointer_type ptr) const {
+    const Index begin_0 = policy.m_lower[0];
+    const Index begin_1 = policy.m_lower[1];
+    const Index begin_2 = policy.m_lower[2];
+    const Index begin_3 = policy.m_lower[3];
+    const Index begin_4 = policy.m_lower[4];
+    const Index begin_5 = policy.m_lower[5];
+
+    const Index end_0 = policy.m_upper[0];
+    const Index end_1 = policy.m_upper[1];
+    const Index end_2 = policy.m_upper[2];
+    const Index end_3 = policy.m_upper[3];
+    const Index end_4 = policy.m_upper[4];
+    const Index end_5 = policy.m_upper[5];
+
+    ValueType result = ValueType();
+
+    // FIXME_OPENMPTARGET: Unable to separate directives and their companion
+    // loops which leads to code duplication for different reduction types.
+    if constexpr (UseReducer) {
+#pragma omp declare reduction(                                         \
+    custom:ValueType                                                   \
+    : OpenMPTargetReducerWrapper <ReducerType>::join(omp_out, omp_in)) \
+    initializer(OpenMPTargetReducerWrapper <ReducerType>::init(omp_priv))
+
+#pragma omp target teams distribute parallel for collapse(6) map(to         \
+                                                                 : functor) \
+    reduction(custom                                                        \
+              : result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            for (auto i3 = begin_3; i3 < end_3; ++i3) {
+              for (auto i4 = begin_4; i4 < end_4; ++i4) {
+                for (auto i5 = begin_5; i5 < end_5; ++i5) {
+                  if constexpr (std::is_same<typename Policy::work_tag,
+                                             void>::value)
+                    functor(i0, i1, i2, i3, i4, i5, result);
+                  else
+                    functor(typename Policy::work_tag(), i0, i1, i2, i3, i4, i5,
+                            result);
+                }
+              }
+            }
+          }
+        }
+      }
+    } else {
+#pragma omp target teams distribute parallel for collapse(6) map(to : functor) \
+reduction(+:result)
+      for (auto i0 = begin_0; i0 < end_0; ++i0) {
+        for (auto i1 = begin_1; i1 < end_1; ++i1) {
+          for (auto i2 = begin_2; i2 < end_2; ++i2) {
+            for (auto i3 = begin_3; i3 < end_3; ++i3) {
+              for (auto i4 = begin_4; i4 < end_4; ++i4) {
+                for (auto i5 = begin_5; i5 < end_5; ++i5) {
+                  if constexpr (std::is_same<typename Policy::work_tag,
+                                             void>::value)
+                    functor(i0, i1, i2, i3, i4, i5, result);
+                  else
+                    functor(typename Policy::work_tag(), i0, i1, i2, i3, i4, i5,
+                            result);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+
+    ParReduceCommon::memcpy_result(ptr, &result, sizeof(ValueType),
+                                   m_result_ptr_on_device);
+  }
+
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    return 256;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+#undef KOKKOS_IMPL_MDRANGE_USE_NO_TILES
+#endif /* KOKKOS_OPENMPTARGET_PARALLEL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.cpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.cpp
new file mode 100644 (file)
index 0000000..1ada2b1
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET) && defined(KOKKOS_ENABLE_TASKPOLICY)
+
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<Kokkos::Experimental::OpenMPTarget>;
+
+//----------------------------------------------------------------------------
+
+TaskExec<Kokkos::Experimental::OpenMPTarget>::TaskExec()
+    : m_self_exec(0),
+      m_team_exec(0),
+      m_sync_mask(0),
+      m_sync_value(0),
+      m_sync_step(0),
+      m_group_rank(0),
+      m_team_rank(0),
+      m_team_size(1) {}
+
+TaskExec<Kokkos::Experimental::OpenMPTarget>::TaskExec(
+    Kokkos::Impl::OpenMPTargetExec &arg_exec, int const arg_team_size)
+    : m_self_exec(&arg_exec),
+      m_team_exec(arg_exec.pool_rev(arg_exec.pool_rank_rev() / arg_team_size)),
+      m_sync_mask(0),
+      m_sync_value(0),
+      m_sync_step(0),
+      m_group_rank(arg_exec.pool_rank_rev() / arg_team_size),
+      m_team_rank(arg_exec.pool_rank_rev() % arg_team_size),
+      m_team_size(arg_team_size) {
+  // This team spans
+  //    m_self_exec->pool_rev( team_size * group_rank )
+  //    m_self_exec->pool_rev( team_size * ( group_rank + 1 ) - 1 )
+
+  int64_t volatile *const sync = (int64_t *)m_self_exec->scratch_reduce();
+
+  sync[0] = int64_t(0);
+  sync[1] = int64_t(0);
+
+  for (int i = 0; i < m_team_size; ++i) {
+    m_sync_value |= int64_t(1) << (8 * i);
+    m_sync_mask |= int64_t(3) << (8 * i);
+  }
+
+  Kokkos::memory_fence();
+}
+
+void TaskExec<Kokkos::Experimental::OpenMPTarget>::team_barrier_impl() const {
+  if (m_team_exec->scratch_reduce_size() < int(2 * sizeof(int64_t))) {
+    Kokkos::abort("TaskQueue<OpenMPTarget> scratch_reduce memory too small");
+  }
+
+  // Use team shared memory to synchronize.
+  // Alternate memory locations between barriers to avoid a sequence
+  // of barriers overtaking one another.
+
+  int64_t volatile *const sync =
+      ((int64_t *)m_team_exec->scratch_reduce()) + (m_sync_step & 0x01);
+
+  // This team member sets one byte within the sync variable
+  int8_t volatile *const sync_self = ((int8_t *)sync) + m_team_rank;
+
+  *sync_self = int8_t(m_sync_value & 0x03);  // signal arrival
+
+  while (m_sync_value != *sync)
+    ;  // wait for team to arrive
+
+  ++m_sync_step;
+
+  if (0 == (0x01 & m_sync_step)) {  // Every other step
+    m_sync_value ^= m_sync_mask;
+    if (1000 < m_sync_step) m_sync_step = 0;
+  }
+}
+
+//----------------------------------------------------------------------------
+
+void TaskQueueSpecialization<Kokkos::Experimental::OpenMPTarget>::execute(
+    TaskQueue<Kokkos::Experimental::OpenMPTarget> *const queue) {
+  using execution_space = Kokkos::Experimental::OpenMPTarget;
+  using queue_type      = TaskQueue<execution_space>;
+  using task_root_type  = TaskBase<execution_space, void, void>;
+  using PoolExec        = Kokkos::Impl::OpenMPTargetExec;
+  using Member          = TaskExec<execution_space>;
+
+  task_root_type *const end = (task_root_type *)task_root_type::EndTag;
+
+  // Required:  team_size <= 8
+
+  const int team_size = PoolExec::pool_size(2);  // Threads per core
+  // const int team_size = PoolExec::pool_size(1); // Threads per NUMA
+
+  if (8 < team_size) {
+    Kokkos::abort("TaskQueue<OpenMPTarget> unsupported team size");
+  }
+
+#pragma omp parallel
+  {
+    PoolExec &self = *PoolExec::get_thread_omp();
+
+    Member single_exec;
+    Member team_exec(self, team_size);
+
+    // Team shared memory
+    task_root_type *volatile *const task_shared =
+        (task_root_type **)team_exec.m_team_exec->scratch_thread();
+
+// Barrier across entire OpenMPTarget thread pool to insure initialization
+#pragma omp barrier
+
+    // Loop until all queues are empty and no tasks in flight
+
+    do {
+      task_root_type *task = 0;
+
+      // Each team lead attempts to acquire either a thread team task
+      // or a single thread task for the team.
+
+      if (0 == team_exec.team_rank()) {
+        task = 0 < *((volatile int *)&queue->m_ready_count) ? end : 0;
+
+        // Loop by priority and then type
+        for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+          for (int j = 0; j < 2 && end == task; ++j) {
+            task = queue_type::pop_task(&queue->m_ready[i][j]);
+          }
+        }
+      }
+
+      // Team lead broadcast acquired task to team members:
+
+      if (1 < team_exec.team_size()) {
+        if (0 == team_exec.team_rank()) *task_shared = task;
+
+        // Fence to be sure task_shared is stored before the barrier
+        Kokkos::memory_fence();
+
+        // Whole team waits for every team member to reach this statement
+        team_exec.team_barrier();
+
+        // Fence to be sure task_shared is stored
+        Kokkos::memory_fence();
+
+        task = *task_shared;
+      }
+
+      if (0 == task) break;  // 0 == m_ready_count
+
+      if (end == task) {
+        // All team members wait for whole team to reach this statement.
+        // Is necessary to prevent task_shared from being updated
+        // before it is read by all threads.
+        team_exec.team_barrier();
+      } else if (task_root_type::TaskTeam == task->m_task_type) {
+        // Thread Team Task
+        (*task->m_apply)(task, &team_exec);
+
+        // The m_apply function performs a barrier
+
+        if (0 == team_exec.team_rank()) {
+          // team member #0 completes the task, which may delete the task
+          queue->complete(task);
+        }
+      } else {
+        // Single Thread Task
+
+        if (0 == team_exec.team_rank()) {
+          (*task->m_apply)(task, &single_exec);
+
+          queue->complete(task);
+        }
+
+        // All team members wait for whole team to reach this statement.
+        // Not necessary to complete the task.
+        // Is necessary to prevent task_shared from being updated
+        // before it is read by all threads.
+        team_exec.team_barrier();
+      }
+    } while (1);
+  }
+  // END #pragma omp parallel
+}
+
+void TaskQueueSpecialization<Kokkos::Experimental::OpenMPTarget>::
+    iff_single_thread_recursive_execute(
+        TaskQueue<Kokkos::Experimental::OpenMPTarget> *const queue) {
+  using execution_space = Kokkos::Experimental::OpenMPTarget;
+  using queue_type      = TaskQueue<execution_space>;
+  using task_root_type  = TaskBase<execution_space, void, void>;
+  using Member          = TaskExec<execution_space>;
+
+  if (1 == omp_get_num_threads()) {
+    task_root_type *const end = (task_root_type *)task_root_type::EndTag;
+
+    Member single_exec;
+
+    task_root_type *task = end;
+
+    do {
+      task = end;
+
+      // Loop by priority and then type
+      for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+        for (int j = 0; j < 2 && end == task; ++j) {
+          task = queue_type::pop_task(&queue->m_ready[i][j]);
+        }
+      }
+
+      if (end == task) break;
+
+      (*task->m_apply)(task, &single_exec);
+
+      queue->complete(task);
+
+    } while (1);
+  }
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_OPENMPTARGET ) && defined( \
+          KOKKOS_ENABLE_TASKPOLICY ) */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_Task.hpp
new file mode 100644 (file)
index 0000000..f7ba01d
--- /dev/null
@@ -0,0 +1,347 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_OPENMP_TASK_HPP
+#define KOKKOS_IMPL_OPENMP_TASK_HPP
+
+#if defined(KOKKOS_ENABLE_TASKPOLICY)
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <>
+class TaskQueueSpecialization<Kokkos::Experimental::OpenMPTarget> {
+ public:
+  using execution_space = Kokkos::Experimental::OpenMPTarget;
+  using queue_type      = Kokkos::Impl::TaskQueue<execution_space>;
+  using task_base_type  = Kokkos::Impl::TaskBase<execution_space, void, void>;
+
+  // Must specify memory space
+  using memory_space = Kokkos::HostSpace;
+
+  static void iff_single_thread_recursive_execute(queue_type* const);
+
+  // Must provide task queue execution function
+  static void execute(queue_type* const);
+
+  // Must provide mechanism to set function pointer in
+  // execution space from the host process.
+  template <typename FunctorType>
+  static void proc_set_apply(task_base_type::function_type* ptr) {
+    using TaskType = TaskBase<Kokkos::Experimental::OpenMPTarget,
+                              typename FunctorType::value_type, FunctorType>;
+    *ptr           = TaskType::apply;
+  }
+};
+
+extern template class TaskQueue<Kokkos::Experimental::OpenMPTarget>;
+
+//----------------------------------------------------------------------------
+
+template <>
+class TaskExec<Kokkos::Experimental::OpenMPTarget> {
+ private:
+  TaskExec(TaskExec&&)      = delete;
+  TaskExec(TaskExec const&) = delete;
+  TaskExec& operator=(TaskExec&&) = delete;
+  TaskExec& operator=(TaskExec const&) = delete;
+
+  using PoolExec = Kokkos::Impl::OpenMPTargetExec;
+
+  friend class Kokkos::Impl::TaskQueue<Kokkos::Experimental::OpenMPTarget>;
+  friend class Kokkos::Impl::TaskQueueSpecialization<
+      Kokkos::Experimental::OpenMPTarget>;
+
+  PoolExec* const m_self_exec;  ///< This thread's thread pool data structure
+  PoolExec* const m_team_exec;  ///< Team thread's thread pool data structure
+  int64_t m_sync_mask;
+  int64_t mutable m_sync_value;
+  int mutable m_sync_step;
+  int m_group_rank;  ///< Which "team" subset of thread pool
+  int m_team_rank;   ///< Which thread within a team
+  int m_team_size;
+
+  TaskExec();
+  TaskExec(PoolExec& arg_exec, int arg_team_size);
+
+  void team_barrier_impl() const;
+
+ public:
+  KOKKOS_FUNCTION void* team_shared() const {
+    KOKKOS_IF_ON_HOST(
+        (return m_team_exec ? m_team_exec->scratch_thread() : nullptr;))
+
+    KOKKOS_IF_ON_DEVICE((return nullptr;))
+  }
+
+  KOKKOS_FUNCTION int team_shared_size() const {
+    KOKKOS_IF_ON_HOST(
+        (return m_team_exec ? m_team_exec->scratch_thread_size() : 0;))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /**\brief  Whole team enters this function call
+   *         before any teeam member returns from
+   *         this function call.
+   */
+  KOKKOS_FUNCTION void team_barrier() const {
+    KOKKOS_IF_ON_HOST((if (1 < m_team_size) { team_barrier_impl(); }))
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int team_rank() const { return m_team_rank; }
+
+  KOKKOS_INLINE_FUNCTION
+  int team_size() const { return m_team_size; }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >
+TeamThreadRange(Impl::TaskExec<Kokkos::Experimental::OpenMPTarget>& thread,
+                const iType& count) {
+  return Impl::TeamThreadRangeBoundariesStruct<
+      iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >(thread,
+                                                                  count);
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >
+TeamThreadRange(Impl::TaskExec<Kokkos::Experimental::OpenMPTarget>& thread,
+                const iType& start, const iType& end) {
+  return Impl::TeamThreadRangeBoundariesStruct<
+      iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >(thread, start,
+                                                                  end);
+}
+
+/** \brief  Inter-thread parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
+        loop_boundaries,
+    const Lambda& lambda) {
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i);
+  }
+}
+
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
+        loop_boundaries,
+    const Lambda& lambda, ValueType& initialized_result) {
+  int team_rank =
+      loop_boundaries.thread.team_rank();  // member num within the team
+  ValueType result = initialized_result;
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    ValueType* shared = (ValueType*)loop_boundaries.thread.team_shared();
+
+    loop_boundaries.thread.team_barrier();
+    shared[team_rank] = result;
+
+    loop_boundaries.thread.team_barrier();
+
+    // reduce across threads to thread 0
+    if (team_rank == 0) {
+      for (int i = 1; i < loop_boundaries.thread.team_size(); i++) {
+        shared[0] += shared[i];
+      }
+    }
+
+    loop_boundaries.thread.team_barrier();
+
+    // broadcast result
+    initialized_result = shared[0];
+  } else {
+    initialized_result = result;
+  }
+}
+
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
+        loop_boundaries,
+    const Lambda& lambda, const JoinType& join, ValueType& initialized_result) {
+  int team_rank =
+      loop_boundaries.thread.team_rank();  // member num within the team
+  ValueType result = initialized_result;
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+
+  if (1 < loop_boundaries.thread.team_size()) {
+    ValueType* shared = (ValueType*)loop_boundaries.thread.team_shared();
+
+    loop_boundaries.thread.team_barrier();
+    shared[team_rank] = result;
+
+    loop_boundaries.thread.team_barrier();
+
+    // reduce across threads to thread 0
+    if (team_rank == 0) {
+      for (int i = 1; i < loop_boundaries.thread.team_size(); i++) {
+        join(shared[0], shared[i]);
+      }
+    }
+
+    loop_boundaries.thread.team_barrier();
+
+    // broadcast result
+    initialized_result = shared[0];
+  } else {
+    initialized_result = result;
+  }
+}
+
+// placeholder for future function
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
+        loop_boundaries,
+    const Lambda& lambda, ValueType& initialized_result) {}
+
+// placeholder for future function
+template <typename iType, class Lambda, typename ValueType, class JoinType>
+KOKKOS_INLINE_FUNCTION void parallel_reduce(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
+        loop_boundaries,
+    const Lambda& lambda, const JoinType& join, ValueType& initialized_result) {
+}
+
+template <typename ValueType, typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
+        loop_boundaries,
+    const Lambda& lambda) {
+  ValueType accum = 0;
+  ValueType val, local_total;
+  ValueType* shared = (ValueType*)loop_boundaries.thread.team_shared();
+  int team_size     = loop_boundaries.thread.team_size();
+  int team_rank =
+      loop_boundaries.thread.team_rank();  // member num within the team
+
+  // Intra-member scan
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    local_total = 0;
+    lambda(i, local_total, false);
+    val = accum;
+    lambda(i, val, true);
+    accum += local_total;
+  }
+
+  shared[team_rank] = accum;
+  loop_boundaries.thread.team_barrier();
+
+  // Member 0 do scan on accumulated totals
+  if (team_rank == 0) {
+    for (iType i = 1; i < team_size; i += 1) {
+      shared[i] += shared[i - 1];
+    }
+    accum = 0;  // Member 0 set accum to 0 in preparation for inter-member scan
+  }
+
+  loop_boundaries.thread.team_barrier();
+
+  // Inter-member scan adding in accumulated totals
+  if (team_rank != 0) {
+    accum = shared[team_rank - 1];
+  }
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    local_total = 0;
+    lambda(i, local_total, false);
+    val = accum;
+    lambda(i, val, true);
+    accum += local_total;
+  }
+}
+
+// placeholder for future function
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::TaskExec<Kokkos::Experimental::OpenMPTarget> >&
+        loop_boundaries,
+    const Lambda& lambda) {}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKPOLICY ) */
+#endif /* #ifndef KOKKOS_IMPL_OPENMP_TASK_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp b/bundled/kokkos-3.7.00/core/src/OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp
new file mode 100644 (file)
index 0000000..fa34861
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGET_UNIQUE_TOKEN_HPP
+#define KOKKOS_OPENMPTARGET_UNIQUE_TOKEN_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+
+#include <Kokkos_OpenMPTargetSpace.hpp>
+#include <Kokkos_UniqueToken.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+// both global and instance Unique Tokens are implemented in the same way
+template <>
+class UniqueToken<OpenMPTarget, UniqueTokenScope::Global> {
+ protected:
+  uint32_t volatile* m_buffer;
+  uint32_t m_count;
+
+ public:
+  using execution_space = OpenMPTarget;
+  using size_type       = int32_t;
+
+  explicit UniqueToken(execution_space const& = execution_space());
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(UniqueToken&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(UniqueToken&&) = default;
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type size() const noexcept { return m_count; }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type acquire() const {
+    const Kokkos::pair<int, int> result =
+        Kokkos::Impl::concurrent_bitset::acquire_bounded(
+            m_buffer, m_count, Kokkos::Impl::clock_tic() % m_count);
+
+    if (result.first < 0) {
+      Kokkos::abort(
+          "UniqueToken<OpenMPTarget> failure to acquire tokens, no tokens "
+          "available");
+    }
+
+    return result.first;
+  }
+
+  /// \brief release an acquired value
+  KOKKOS_INLINE_FUNCTION
+  void release(size_type i) const noexcept {
+    Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
+  }
+};
+
+template <>
+class UniqueToken<OpenMPTarget, UniqueTokenScope::Instance>
+    : public UniqueToken<OpenMPTarget, UniqueTokenScope::Global> {
+ private:
+  Kokkos::View<uint32_t*, ::Kokkos::Experimental::OpenMPTargetSpace>
+      m_buffer_view;
+
+ public:
+  explicit UniqueToken(execution_space const& arg = execution_space())
+      : UniqueToken<OpenMPTarget, UniqueTokenScope::Global>(arg) {}
+
+  UniqueToken(size_type max_size, execution_space const& = execution_space())
+      : m_buffer_view(
+            "Kokkos::UniqueToken::m_buffer_view",
+            ::Kokkos::Impl::concurrent_bitset::buffer_bound(max_size)) {
+    m_buffer = m_buffer_view.data();
+    m_count  = max_size;
+  }
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif  // KOKKOS_ENABLE_OPENMPTARGET
+#endif  // KOKKOS_OPENMPTARGET_UNIQUE_TOKEN_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL.cpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL.cpp
new file mode 100644 (file)
index 0000000..840db43
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Concepts.hpp>
+#include <SYCL/Kokkos_SYCL_Instance.hpp>
+#include <Kokkos_SYCL.hpp>
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_Serial.hpp>
+#include <Kokkos_Core.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+namespace {
+template <typename C>
+struct Container {
+  explicit Container(const C& c) : container(c) {}
+
+  friend std::ostream& operator<<(std::ostream& os, const Container& that) {
+    os << that.container.size();
+    for (const auto& v : that.container) {
+      os << "\n\t" << v;
+    }
+    return os;
+  }
+
+ private:
+  const C& container;
+};
+}  // namespace
+
+namespace Kokkos {
+namespace Experimental {
+SYCL::SYCL()
+    : m_space_instance(&Impl::SYCLInternal::singleton(),
+                       [](Impl::SYCLInternal*) {}) {
+  Impl::SYCLInternal::singleton().verify_is_initialized(
+      "SYCL instance constructor");
+}
+
+SYCL::SYCL(const sycl::queue& stream)
+    : m_space_instance(new Impl::SYCLInternal, [](Impl::SYCLInternal* ptr) {
+        ptr->finalize();
+        delete ptr;
+      }) {
+  Impl::SYCLInternal::singleton().verify_is_initialized(
+      "SYCL instance constructor");
+  m_space_instance->initialize(stream);
+}
+
+int SYCL::concurrency() {
+  return Impl::SYCLInternal::singleton().m_maxConcurrency;
+}
+
+const char* SYCL::name() { return "SYCL"; }
+
+bool SYCL::impl_is_initialized() {
+  return Impl::SYCLInternal::singleton().is_initialized();
+}
+
+void SYCL::impl_finalize() { Impl::SYCLInternal::singleton().finalize(); }
+
+void SYCL::print_configuration(std::ostream& os, bool verbose) const {
+  os << "Devices:\n";
+  os << "  KOKKOS_ENABLE_SYCL: yes\n";
+
+  os << "\nRuntime Configuration:\n";
+
+  os << "macro  KOKKOS_ENABLE_SYCL : defined\n";
+  if (verbose)
+    SYCL::impl_sycl_info(os, m_space_instance->m_queue->get_device());
+}
+
+void SYCL::fence(const std::string& name) const {
+  Impl::SYCLInternal::fence(*m_space_instance->m_queue, name,
+                            impl_instance_id());
+}
+
+void SYCL::impl_static_fence(const std::string& name) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::SYCL>(
+      name,
+      Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+          GlobalDeviceSynchronization,
+      [&]() {
+        // guard accessing all_queues
+        std::scoped_lock lock(Impl::SYCLInternal::mutex);
+        for (auto& queue : Impl::SYCLInternal::all_queues) {
+          try {
+            (*queue)->wait_and_throw();
+          } catch (sycl::exception const& e) {
+            Kokkos::Impl::throw_runtime_exception(
+                std::string("There was a synchronous SYCL error:\n") +=
+                e.what());
+          }
+        }
+      });
+}
+
+int SYCL::sycl_device() const {
+  return impl_internal_space_instance()->m_syclDev;
+}
+
+void SYCL::impl_initialize(InitializationSettings const& settings) {
+  std::vector<sycl::device> gpu_devices =
+      sycl::device::get_devices(sycl::info::device_type::gpu);
+  // If the device id is not specified and there are no GPUs, sidestep Kokkos
+  // device selection and use whatever is available (if no GPU architecture is
+  // specified).
+#if !defined(KOKKOS_ARCH_INTEL_GPU) && !defined(KOKKOS_ARCH_KEPLER) && \
+    !defined(KOKKOS_ARCH_MAXWELL) && !defined(KOKKOS_ARCH_PASCAL) &&   \
+    !defined(KOKKOS_ARCH_VOLTA) && !defined(KOKKOS_ARCH_TURING75) &&   \
+    !defined(KOKKOS_ARCH_AMPERE)
+  if (!settings.has_device_id() && gpu_devices.empty()) {
+    Impl::SYCLInternal::singleton().initialize(sycl::device());
+    return;
+  }
+#endif
+  using Kokkos::Impl::get_gpu;
+  Impl::SYCLInternal::singleton().initialize(gpu_devices[get_gpu(settings)]);
+}
+
+std::ostream& SYCL::impl_sycl_info(std::ostream& os,
+                                   const sycl::device& device) {
+  using namespace sycl::info;
+  return os << "Name: " << device.get_info<device::name>()
+            << "\nDriver Version: " << device.get_info<device::driver_version>()
+            << "\nIs Host: " << device.is_host()
+            << "\nIs CPU: " << device.is_cpu()
+            << "\nIs GPU: " << device.is_gpu()
+            << "\nIs Accelerator: " << device.is_accelerator()
+            << "\nVendor Id: " << device.get_info<device::vendor_id>()
+            << "\nMax Compute Units: "
+            << device.get_info<device::max_compute_units>()
+            << "\nMax Work Item Dimensions: "
+            << device.get_info<device::max_work_item_dimensions>()
+            << "\nMax Work Group Size: "
+            << device.get_info<device::max_work_group_size>()
+            << "\nPreferred Vector Width Char: "
+            << device.get_info<device::preferred_vector_width_char>()
+            << "\nPreferred Vector Width Short: "
+            << device.get_info<device::preferred_vector_width_short>()
+            << "\nPreferred Vector Width Int: "
+            << device.get_info<device::preferred_vector_width_int>()
+            << "\nPreferred Vector Width Long: "
+            << device.get_info<device::preferred_vector_width_long>()
+            << "\nPreferred Vector Width Float: "
+            << device.get_info<device::preferred_vector_width_float>()
+            << "\nPreferred Vector Width Double: "
+            << device.get_info<device::preferred_vector_width_double>()
+            << "\nPreferred Vector Width Half: "
+            << device.get_info<device::preferred_vector_width_half>()
+            << "\nNative Vector Width Char: "
+            << device.get_info<device::native_vector_width_char>()
+            << "\nNative Vector Width Short: "
+            << device.get_info<device::native_vector_width_short>()
+            << "\nNative Vector Width Int: "
+            << device.get_info<device::native_vector_width_int>()
+            << "\nNative Vector Width Long: "
+            << device.get_info<device::native_vector_width_long>()
+            << "\nNative Vector Width Float: "
+            << device.get_info<device::native_vector_width_float>()
+            << "\nNative Vector Width Double: "
+            << device.get_info<device::native_vector_width_double>()
+            << "\nNative Vector Width Half: "
+            << device.get_info<device::native_vector_width_half>()
+            << "\nAddress Bits: " << device.get_info<device::address_bits>()
+            << "\nImage Support: " << device.get_info<device::image_support>()
+            << "\nMax Mem Alloc Size: "
+            << device.get_info<device::max_mem_alloc_size>()
+            << "\nMax Read Image Args: "
+            << device.get_info<device::max_read_image_args>()
+            << "\nImage2d Max Width: "
+            << device.get_info<device::image2d_max_width>()
+            << "\nImage2d Max Height: "
+            << device.get_info<device::image2d_max_height>()
+            << "\nImage3d Max Width: "
+            << device.get_info<device::image3d_max_width>()
+            << "\nImage3d Max Height: "
+            << device.get_info<device::image3d_max_height>()
+            << "\nImage3d Max Depth: "
+            << device.get_info<device::image3d_max_depth>()
+            << "\nImage Max Buffer Size: "
+            << device.get_info<device::image_max_buffer_size>()
+            << "\nImage Max Array Size: "
+            << device.get_info<device::image_max_array_size>()
+            << "\nMax Samplers: " << device.get_info<device::max_samplers>()
+            << "\nMax Parameter Size: "
+            << device.get_info<device::max_parameter_size>()
+            << "\nMem Base Addr Align: "
+            << device.get_info<device::mem_base_addr_align>()
+            << "\nGlobal Cache Mem Line Size: "
+            << device.get_info<device::global_mem_cache_line_size>()
+            << "\nGlobal Mem Cache Size: "
+            << device.get_info<device::global_mem_cache_size>()
+            << "\nGlobal Mem Size: "
+            << device.get_info<device::global_mem_size>()
+            << "\nLocal Mem Size: " << device.get_info<device::local_mem_size>()
+            << "\nError Correction Support: "
+            << device.get_info<device::error_correction_support>()
+            << "\nHost Unified Memory: "
+            << device.get_info<device::host_unified_memory>()
+            << "\nProfiling Timer Resolution: "
+            << device.get_info<device::profiling_timer_resolution>()
+            << "\nIs Endian Little: "
+            << device.get_info<device::is_endian_little>()
+            << "\nIs Available: " << device.get_info<device::is_available>()
+            << "\nIs Compiler Available: "
+            << device.get_info<device::is_compiler_available>()
+            << "\nIs Linker Available: "
+            << device.get_info<device::is_linker_available>()
+            << "\nQueue Profiling: "
+            << device.get_info<device::queue_profiling>()
+            << "\nVendor: " << device.get_info<device::vendor>()
+            << "\nProfile: " << device.get_info<device::profile>()
+            << "\nVersion: " << device.get_info<device::version>()
+            << "\nPrintf Buffer Size: "
+            << device.get_info<device::printf_buffer_size>()
+            << "\nPreferred Interop User Sync: "
+            << device.get_info<device::preferred_interop_user_sync>()
+            << "\nPartition Max Sub Devices: "
+            << device.get_info<device::partition_max_sub_devices>()
+            << "\nReference Count: "
+            << device.get_info<device::reference_count>() << '\n';
+}
+
+namespace Impl {
+
+int g_sycl_space_factory_initialized =
+    Kokkos::Impl::initialize_space_factory<SYCL>("170_SYCL");
+
+}
+}  // namespace Experimental
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Abort.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Abort.hpp
new file mode 100644 (file)
index 0000000..e376f01
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_ABORT_HPP
+#define KOKKOS_SYCL_ABORT_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_SYCL)
+#include <CL/sycl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+inline void sycl_abort(char const* msg) {
+#ifdef NDEBUG
+  KOKKOS_IMPL_DO_NOT_USE_PRINTF("Aborting with message %s.\n", msg);
+#else
+  // Choosing "" here causes problems but a single whitespace character works.
+  const char* empty = " ";
+  __assert_fail(msg, empty, 0, empty);
+#endif
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_DeepCopy.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_DeepCopy.hpp
new file mode 100644 (file)
index 0000000..160f606
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCLDEEPCOPY_HPP
+#define KOKKOS_SYCLDEEPCOPY_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_SYCL.hpp>
+
+#include <vector>
+
+#ifdef KOKKOS_ENABLE_SYCL
+
+namespace Kokkos {
+namespace Impl {
+
+template <class DT, class... DP>
+struct ZeroMemset<Kokkos::Experimental::SYCL, DT, DP...> {
+  ZeroMemset(const Kokkos::Experimental::SYCL& exec_space,
+             const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    auto event = exec_space.impl_internal_space_instance()->m_queue->memset(
+        dst.data(), 0,
+        dst.size() * sizeof(typename View<DT, DP...>::value_type));
+    exec_space.impl_internal_space_instance()
+        ->m_queue->ext_oneapi_submit_barrier(std::vector<sycl::event>{event});
+  }
+
+  ZeroMemset(const View<DT, DP...>& dst,
+             typename View<DT, DP...>::const_value_type&) {
+    Experimental::Impl::SYCLInternal::singleton().m_queue->memset(
+        dst.data(), 0,
+        dst.size() * sizeof(typename View<DT, DP...>::value_type));
+  }
+};
+
+void DeepCopySYCL(void* dst, const void* src, size_t n);
+void DeepCopyAsyncSYCL(const Kokkos::Experimental::SYCL& instance, void* dst,
+                       const void* src, size_t n);
+void DeepCopyAsyncSYCL(void* dst, const void* src, size_t n);
+
+template <class MemSpace>
+struct DeepCopy<MemSpace, HostSpace, Kokkos::Experimental::SYCL,
+                std::enable_if_t<is_sycl_type_space<MemSpace>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
+  DeepCopy(const Kokkos::Experimental::SYCL& instance, void* dst,
+           const void* src, size_t n) {
+    DeepCopyAsyncSYCL(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace>
+struct DeepCopy<HostSpace, MemSpace, Kokkos::Experimental::SYCL,
+                std::enable_if_t<is_sycl_type_space<MemSpace>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
+  DeepCopy(const Kokkos::Experimental::SYCL& instance, void* dst,
+           const void* src, size_t n) {
+    DeepCopyAsyncSYCL(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace1, class MemSpace2>
+struct DeepCopy<MemSpace1, MemSpace2, Kokkos::Experimental::SYCL,
+                std::enable_if_t<is_sycl_type_space<MemSpace1>::value &&
+                                 is_sycl_type_space<MemSpace2>::value>> {
+  DeepCopy(void* dst, const void* src, size_t n) { DeepCopySYCL(dst, src, n); }
+  DeepCopy(const Kokkos::Experimental::SYCL& instance, void* dst,
+           const void* src, size_t n) {
+    DeepCopyAsyncSYCL(instance, dst, src, n);
+  }
+};
+
+template <class MemSpace1, class MemSpace2, class ExecutionSpace>
+struct DeepCopy<
+    MemSpace1, MemSpace2, ExecutionSpace,
+    std::enable_if_t<
+        is_sycl_type_space<MemSpace1>::value &&
+        is_sycl_type_space<MemSpace2>::value &&
+        !std::is_same<ExecutionSpace, Kokkos::Experimental::SYCL>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopySYCL(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncSYCL(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<") + MemSpace1::name() + "Space, " +
+        MemSpace2::name() +
+        "Space, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<
+    MemSpace, HostSpace, ExecutionSpace,
+    std::enable_if_t<
+        is_sycl_type_space<MemSpace>::value &&
+        !std::is_same<ExecutionSpace, Kokkos::Experimental::SYCL>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopySYCL(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncSYCL(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<") + MemSpace::name() +
+        "Space, HostSpace, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+template <class MemSpace, class ExecutionSpace>
+struct DeepCopy<
+    HostSpace, MemSpace, ExecutionSpace,
+    std::enable_if_t<
+        is_sycl_type_space<MemSpace>::value &&
+        !std::is_same<ExecutionSpace, Kokkos::Experimental::SYCL>::value>> {
+  inline DeepCopy(void* dst, const void* src, size_t n) {
+    DeepCopySYCL(dst, src, n);
+  }
+
+  inline DeepCopy(const ExecutionSpace& exec, void* dst, const void* src,
+                  size_t n) {
+    exec.fence(fence_string());
+    DeepCopyAsyncSYCL(dst, src, n);
+  }
+
+ private:
+  static const std::string& fence_string() {
+    static const std::string string =
+        std::string("Kokkos::Impl::DeepCopy<HostSpace, ") + MemSpace::name() +
+        "Space, ExecutionSpace>::DeepCopy: fence before copy";
+    return string;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Half_Conversion.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Half_Conversion.hpp
new file mode 100644 (file)
index 0000000..3adbb16
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.5
+//       Copyright (2022) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_HALF_HPP_
+#define KOKKOS_SYCL_HALF_HPP_
+
+#ifdef KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
+
+#include <Kokkos_Half.hpp>
+#include <Kokkos_NumericTraits.hpp>  // reduction_identity
+
+namespace Kokkos {
+namespace Experimental {
+
+/************************** half conversions **********************************/
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(half_t val) { return val; }
+
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(float val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(double val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(short val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned short val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(int val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned int val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long long val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long long val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(long val) { return half_t::impl_type(val); }
+KOKKOS_INLINE_FUNCTION
+half_t cast_to_half(unsigned long val) { return half_t::impl_type(val); }
+
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, float>::value, T>
+cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, double>::value, T>
+cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, short>::value, T>
+cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned short>::value, T>
+    cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, int>::value, T>
+cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, unsigned int>::value, T>
+cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long long>::value, T>
+cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long long>::value, T>
+    cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<std::is_same<T, long>::value, T>
+cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+template <class T>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<std::is_same<T, unsigned long>::value, T>
+    cast_from_half(half_t val) {
+  return half_t::impl_type(val);
+}
+}  // namespace Experimental
+
+template <>
+struct reduction_identity<Kokkos::Experimental::half_t> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+  sum() noexcept {
+    return Kokkos::Experimental::half_t::impl_type(0.0F);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+  prod() noexcept {
+    return Kokkos::Experimental::half_t::impl_type(1.0F);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+  max() noexcept {
+    return std::numeric_limits<
+        Kokkos::Experimental::half_t::impl_type>::lowest();
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static Kokkos::Experimental::half_t
+  min() noexcept {
+    return std::numeric_limits<Kokkos::Experimental::half_t::impl_type>::max();
+  }
+};
+
+}  // namespace Kokkos
+#endif  // KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Half_Impl_Type.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Half_Impl_Type.hpp
new file mode 100644 (file)
index 0000000..cf7d513
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.5
+//       Copyright (2022) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_HALF_IMPL_TYPE_HPP_
+#define KOKKOS_SYCL_HALF_IMPL_TYPE_HPP_
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_SYCL
+
+#include <CL/sycl.hpp>
+
+#ifndef KOKKOS_IMPL_HALF_TYPE_DEFINED
+// Make sure no one else tries to define half_t
+#define KOKKOS_IMPL_HALF_TYPE_DEFINED
+#define KOKKOS_IMPL_SYCL_HALF_TYPE_DEFINED
+
+namespace Kokkos {
+namespace Impl {
+struct half_impl_t {
+  using type = sycl::half;
+};
+}  // namespace Impl
+}  // namespace Kokkos
+#endif  // KOKKOS_IMPL_HALF_TYPE_DEFINED
+#endif  // KOKKOS_ENABLE_SYCL
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Instance.cpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Instance.cpp
new file mode 100644 (file)
index 0000000..3772124
--- /dev/null
@@ -0,0 +1,365 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>  //kokkos_malloc
+
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+std::vector<std::optional<sycl::queue>*> SYCLInternal::all_queues;
+std::mutex SYCLInternal::mutex;
+
+Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> sycl_global_unique_token_locks(
+    bool deallocate) {
+  static Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> locks =
+      Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>();
+  if (!deallocate && locks.extent(0) == 0)
+    locks = Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>(
+        "Kokkos::UniqueToken<SYCL>::m_locks", SYCL().concurrency());
+  if (deallocate) locks = Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>();
+  return locks;
+}
+
+SYCLInternal::~SYCLInternal() {
+  if (!was_finalized || m_scratchSpace || m_scratchFlags) {
+    std::cerr << "Kokkos::Experimental::SYCL ERROR: Failed to call "
+                 "Kokkos::Experimental::SYCL::finalize()"
+              << std::endl;
+    std::cerr.flush();
+  }
+}
+
+int SYCLInternal::verify_is_initialized(const char* const label) const {
+  if (!is_initialized()) {
+    Kokkos::abort((std::string("Kokkos::Experimental::SYCL::") + label +
+                   " : ERROR device not initialized\n")
+                      .c_str());
+  }
+  return is_initialized();
+}
+SYCLInternal& SYCLInternal::singleton() {
+  static SYCLInternal self;
+  return self;
+}
+
+void SYCLInternal::initialize(const sycl::device& d) {
+  auto exception_handler = [](sycl::exception_list exceptions) {
+    bool asynchronous_error = false;
+    for (std::exception_ptr const& e : exceptions) {
+      try {
+        std::rethrow_exception(e);
+      } catch (sycl::exception const& e) {
+        std::cerr << e.what() << '\n';
+        asynchronous_error = true;
+      }
+    }
+    if (asynchronous_error)
+      Kokkos::Impl::throw_runtime_exception(
+          "There was an asynchronous SYCL error!\n");
+  };
+  initialize(sycl::queue{d, exception_handler});
+}
+
+// FIXME_SYCL
+void SYCLInternal::initialize(const sycl::queue& q) {
+  if (was_finalized)
+    Kokkos::abort("Calling SYCL::initialize after SYCL::finalize is illegal\n");
+
+  if (is_initialized()) return;
+
+  if (!HostSpace::execution_space::impl_is_initialized()) {
+    const std::string msg(
+        "SYCL::initialize ERROR : HostSpace::execution_space is not "
+        "initialized");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+
+  const bool ok_init = nullptr == m_scratchSpace || nullptr == m_scratchFlags;
+  const bool ok_dev  = true;
+  if (ok_init && ok_dev) {
+    m_queue = q;
+    // guard pushing to all_queues
+    {
+      std::scoped_lock lock(mutex);
+      all_queues.push_back(&m_queue);
+    }
+    const sycl::device& d = m_queue->get_device();
+
+    m_maxWorkgroupSize =
+        d.template get_info<sycl::info::device::max_work_group_size>();
+    // FIXME_SYCL this should give the correct value for NVIDIA GPUs
+    m_maxConcurrency =
+        m_maxWorkgroupSize * 2 *
+        d.template get_info<sycl::info::device::max_compute_units>();
+
+    // Setup concurent bitset for obtaining unique tokens from within an
+    // executing kernel.
+    {
+      const int32_t buffer_bound =
+          Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);
+      using Record = Kokkos::Impl::SharedAllocationRecord<
+          Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
+      Record* const r =
+          Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
+                           "Kokkos::Experimental::SYCL::InternalScratchBitset",
+                           sizeof(uint32_t) * buffer_bound);
+      Record::increment(r);
+    }
+
+    m_maxShmemPerBlock =
+        d.template get_info<sycl::info::device::local_mem_size>();
+
+    for (auto& usm_mem : m_indirectKernelMem) {
+      usm_mem.reset(*m_queue, m_instance_id);
+    }
+
+  } else {
+    std::ostringstream msg;
+    msg << "Kokkos::Experimental::SYCL::initialize(...) FAILED";
+
+    if (!ok_init) {
+      msg << " : Already initialized";
+    }
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+
+  m_team_scratch_current_size = 0;
+  m_team_scratch_ptr          = nullptr;
+}
+
+sycl::device_ptr<void> SYCLInternal::resize_team_scratch_space(
+    std::int64_t bytes, bool force_shrink) {
+  if (m_team_scratch_current_size == 0) {
+    m_team_scratch_current_size = bytes;
+    m_team_scratch_ptr =
+        Kokkos::kokkos_malloc<Experimental::SYCLDeviceUSMSpace>(
+            "Kokkos::Experimental::SYCLDeviceUSMSpace::TeamScratchMemory",
+            m_team_scratch_current_size);
+  }
+  if ((bytes > m_team_scratch_current_size) ||
+      ((bytes < m_team_scratch_current_size) && (force_shrink))) {
+    m_team_scratch_current_size = bytes;
+    m_team_scratch_ptr =
+        Kokkos::kokkos_realloc<Experimental::SYCLDeviceUSMSpace>(
+            m_team_scratch_ptr, m_team_scratch_current_size);
+  }
+  return m_team_scratch_ptr;
+}
+
+uint32_t SYCLInternal::impl_get_instance_id() const { return m_instance_id; }
+
+void SYCLInternal::finalize() {
+  SYCLInternal::fence(*m_queue,
+                      "Kokkos::SYCLInternal::finalize: fence on finalization",
+                      m_instance_id);
+  was_finalized = true;
+
+  // The global_unique_token_locks array is static and should only be
+  // deallocated once by the defualt instance
+  if (this == &singleton()) Impl::sycl_global_unique_token_locks(true);
+
+  using RecordSYCL = Kokkos::Impl::SharedAllocationRecord<SYCLDeviceUSMSpace>;
+  if (nullptr != m_scratchSpace)
+    RecordSYCL::decrement(RecordSYCL::get_record(m_scratchSpace));
+  if (nullptr != m_scratchFlags)
+    RecordSYCL::decrement(RecordSYCL::get_record(m_scratchFlags));
+  m_syclDev           = -1;
+  m_scratchSpaceCount = 0;
+  m_scratchSpace      = nullptr;
+  m_scratchFlagsCount = 0;
+  m_scratchFlags      = nullptr;
+
+  if (m_team_scratch_current_size > 0)
+    Kokkos::kokkos_free<Kokkos::Experimental::SYCLDeviceUSMSpace>(
+        m_team_scratch_ptr);
+  m_team_scratch_current_size = 0;
+  m_team_scratch_ptr          = nullptr;
+
+  for (auto& usm_mem : m_indirectKernelMem) usm_mem.reset();
+  // guard erasing from all_queues
+  {
+    std::scoped_lock lock(mutex);
+    all_queues.erase(std::find(all_queues.begin(), all_queues.end(), &m_queue));
+  }
+  m_queue.reset();
+}
+
+sycl::device_ptr<void> SYCLInternal::scratch_space(const std::size_t size) {
+  const size_type sizeScratchGrain =
+      sizeof(Kokkos::Experimental::SYCL::size_type);
+  if (verify_is_initialized("scratch_space") &&
+      m_scratchSpaceCount * sizeScratchGrain < size) {
+    m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
+
+    using Record = Kokkos::Impl::SharedAllocationRecord<
+        Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
+
+    if (nullptr != m_scratchSpace)
+      Record::decrement(Record::get_record(m_scratchSpace));
+
+    Record* const r =
+        Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
+                         "Kokkos::Experimental::SYCL::InternalScratchSpace",
+                         (sizeScratchGrain * m_scratchSpaceCount));
+
+    Record::increment(r);
+
+    m_scratchSpace = reinterpret_cast<size_type*>(r->data());
+  }
+
+  return m_scratchSpace;
+}
+
+sycl::device_ptr<void> SYCLInternal::scratch_flags(const std::size_t size) {
+  const size_type sizeScratchGrain =
+      sizeof(Kokkos::Experimental::SYCL::size_type);
+  if (verify_is_initialized("scratch_flags") &&
+      m_scratchFlagsCount * sizeScratchGrain < size) {
+    m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
+
+    using Record = Kokkos::Impl::SharedAllocationRecord<
+        Kokkos::Experimental::SYCLDeviceUSMSpace, void>;
+
+    if (nullptr != m_scratchFlags)
+      Record::decrement(Record::get_record(m_scratchFlags));
+
+    Record* const r =
+        Record::allocate(Kokkos::Experimental::SYCLDeviceUSMSpace(*m_queue),
+                         "Kokkos::Experimental::SYCL::InternalScratchFlags",
+                         (sizeScratchGrain * m_scratchFlagsCount));
+
+    Record::increment(r);
+
+    m_scratchFlags = reinterpret_cast<size_type*>(r->data());
+  }
+  m_queue->memset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain);
+  fence(*m_queue,
+        "Kokkos::Experimental::SYCLInternal::scratch_flags fence after "
+        "initializing m_scratchFlags",
+        m_instance_id);
+
+  return m_scratchFlags;
+}
+
+template <typename WAT>
+void SYCLInternal::fence_helper(WAT& wat, const std::string& name,
+                                uint32_t instance_id) {
+  Kokkos::Tools::Experimental::Impl::profile_fence_event<
+      Kokkos::Experimental::SYCL>(
+      name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{instance_id},
+      [&]() {
+        try {
+          wat.wait_and_throw();
+        } catch (sycl::exception const& e) {
+          Kokkos::Impl::throw_runtime_exception(
+              std::string("There was a synchronous SYCL error:\n") += e.what());
+        }
+      });
+}
+template void SYCLInternal::fence_helper<sycl::queue>(sycl::queue&,
+                                                      const std::string&,
+                                                      uint32_t);
+template void SYCLInternal::fence_helper<sycl::event>(sycl::event&,
+                                                      const std::string&,
+                                                      uint32_t);
+
+// This function cycles through a pool of USM allocations for functors
+SYCLInternal::IndirectKernelMem& SYCLInternal::get_indirect_kernel_mem() {
+  // Thread safety: atomically increment round robin variable
+  // NB: atomic_fetch_inc_mod returns values in range [0-N], not
+  // [0-N) as might be expected.
+  size_t next_pool = desul::atomic_fetch_inc_mod(
+      &m_pool_next, m_usm_pool_size - 1, desul::MemoryOrderRelaxed(),
+      desul::MemoryScopeDevice());
+  return m_indirectKernelMem[next_pool];
+}
+
+template <sycl::usm::alloc Kind>
+size_t SYCLInternal::USMObjectMem<Kind>::reserve(size_t n) {
+  assert(m_q);
+
+  if (m_capacity < n) {
+    using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
+    // First free what we have (in case malloc can reuse it)
+    if (m_data) Record::decrement(Record::get_record(m_data));
+
+    Record* const r = Record::allocate(
+        AllocationSpace(*m_q), "Kokkos::Experimental::SYCL::USMObjectMem", n);
+    Record::increment(r);
+
+    m_data = r->data();
+    if constexpr (sycl::usm::alloc::device == Kind)
+      m_staging.reset(new char[n]);
+    m_capacity = n;
+  }
+
+  return m_capacity;
+}
+
+template <sycl::usm::alloc Kind>
+void SYCLInternal::USMObjectMem<Kind>::reset() {
+  if (m_data) {
+    // This implies a fence since this class is not copyable
+    // and deallocating implies a fence across all registered queues.
+    using Record = Kokkos::Impl::SharedAllocationRecord<AllocationSpace, void>;
+    Record::decrement(Record::get_record(m_data));
+
+    m_capacity = 0;
+    m_data     = nullptr;
+  }
+  m_q.reset();
+}
+
+template class SYCLInternal::USMObjectMem<sycl::usm::alloc::shared>;
+template class SYCLInternal::USMObjectMem<sycl::usm::alloc::device>;
+template class SYCLInternal::USMObjectMem<sycl::usm::alloc::host>;
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Instance.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Instance.hpp
new file mode 100644 (file)
index 0000000..45a7887
--- /dev/null
@@ -0,0 +1,355 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_INSTANCE_HPP_
+#define KOKKOS_SYCL_INSTANCE_HPP_
+
+#include <optional>
+#include <CL/sycl.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+
+class SYCLInternal {
+ public:
+  using size_type = int;
+
+  SYCLInternal() = default;
+  ~SYCLInternal();
+
+  SYCLInternal(const SYCLInternal&) = delete;
+  SYCLInternal& operator=(const SYCLInternal&) = delete;
+  SYCLInternal& operator=(SYCLInternal&&) = delete;
+  SYCLInternal(SYCLInternal&&)            = delete;
+
+  sycl::device_ptr<void> scratch_space(const std::size_t size);
+  sycl::device_ptr<void> scratch_flags(const std::size_t size);
+  sycl::device_ptr<void> resize_team_scratch_space(std::int64_t bytes,
+                                                   bool force_shrink = false);
+
+  uint32_t impl_get_instance_id() const;
+  int m_syclDev = 0;
+
+  size_t m_maxWorkgroupSize   = 0;
+  uint32_t m_maxConcurrency   = 0;
+  uint64_t m_maxShmemPerBlock = 0;
+
+  std::size_t m_scratchSpaceCount            = 0;
+  sycl::device_ptr<size_type> m_scratchSpace = nullptr;
+  std::size_t m_scratchFlagsCount            = 0;
+  sycl::device_ptr<size_type> m_scratchFlags = nullptr;
+  // mutex to access shared memory
+  mutable std::mutex m_mutexScratchSpace;
+
+  int64_t m_team_scratch_current_size       = 0;
+  sycl::device_ptr<void> m_team_scratch_ptr = nullptr;
+  mutable std::mutex m_team_scratch_mutex;
+
+  uint32_t m_instance_id = Kokkos::Tools::Experimental::Impl::idForInstance<
+      Kokkos::Experimental::SYCL>(reinterpret_cast<uintptr_t>(this));
+  std::optional<sycl::queue> m_queue;
+
+  // Using std::vector<std::optional<sycl::queue>> reveals a compiler bug when
+  // compiling for the CUDA backend. Storing pointers instead works around this.
+  static std::vector<std::optional<sycl::queue>*> all_queues;
+  // We need a mutex for thread safety when modifying all_queues.
+  static std::mutex mutex;
+
+  // USMObjectMem is a reusable buffer for a single object
+  // in USM memory
+  template <sycl::usm::alloc Kind>
+  class USMObjectMem {
+   public:
+    void reset();
+
+    void reset(sycl::queue q, uint32_t instance_id) {
+      m_instance_id = instance_id;
+      reset();
+      m_q.emplace(std::move(q));
+    }
+    USMObjectMem() = default;
+    explicit USMObjectMem(sycl::queue q, uint32_t instance_id) noexcept
+        : m_q(std::move(q)), m_instance_id(instance_id) {}
+
+    USMObjectMem(USMObjectMem const&) = delete;
+    USMObjectMem(USMObjectMem&&)      = delete;
+    USMObjectMem& operator=(USMObjectMem&&) = delete;
+    USMObjectMem& operator=(USMObjectMem const&) = delete;
+
+    ~USMObjectMem() { reset(); };
+
+    void* data() noexcept { return m_data; }
+    const void* data() const noexcept { return m_data; }
+
+    size_t capacity() const noexcept { return m_capacity; }
+
+    // reserve() allocates space for at least n bytes
+    // returns the new capacity
+    size_t reserve(size_t n);
+
+   private:
+    using AllocationSpace = std::conditional_t<
+        Kind == sycl::usm::alloc::device,
+        Kokkos::Experimental::SYCLDeviceUSMSpace,
+        std::conditional_t<Kind == sycl::usm::alloc::shared,
+                           Kokkos::Experimental::SYCLSharedUSMSpace,
+                           Kokkos::Experimental::SYCLHostUSMSpace>>;
+
+   public:
+    // Performs either sycl::memcpy (for USM device memory) or std::memcpy
+    // (otherwise) and returns a reference to the copied object.
+    template <typename T>
+    T& copy_from(const T& t) {
+      m_mutex.lock();
+      fence();
+      reserve(sizeof(T));
+      if constexpr (sycl::usm::alloc::device == Kind) {
+        std::memcpy(static_cast<void*>(m_staging.get()), std::addressof(t),
+                    sizeof(T));
+        m_copy_event = m_q->memcpy(m_data, m_staging.get(), sizeof(T));
+      } else
+        std::memcpy(m_data, std::addressof(t), sizeof(T));
+      return *reinterpret_cast<T*>(m_data);
+    }
+
+    void fence() {
+      SYCLInternal::fence(
+          m_last_event,
+          "Kokkos::Experimental::SYCLInternal::USMObject fence to wait for "
+          "last event to finish",
+          m_instance_id);
+    }
+
+    void register_event(sycl::event event) {
+      assert(m_last_event
+                 .get_info<sycl::info::event::command_execution_status>() ==
+             sycl::info::event_command_status::complete);
+      m_last_event = event;
+      m_mutex.unlock();
+    }
+
+    sycl::event get_copy_event() const { return m_copy_event; }
+
+   private:
+    // USMObjectMem class invariants
+    // All four expressions below must evaluate to true:
+    //
+    //  !m_data == (m_capacity == 0)
+    //      m_q || !m_data
+    //
+    //  The above invariants mean that:
+    //  if m_data != nullptr then m_capacity != 0 && m_q != nullopt
+    //  if m_data == nullptr then m_capacity == 0
+
+    sycl::event m_copy_event;
+
+    std::optional<sycl::queue> m_q;
+    void* m_data = nullptr;
+    std::unique_ptr<char[]> m_staging;
+
+    size_t m_capacity = 0;
+    sycl::event m_last_event;
+
+    uint32_t m_instance_id;
+
+    // mutex to access the underlying memory
+    mutable std::mutex m_mutex;
+  };
+
+  // An indirect kernel is one where the functor to be executed is explicitly
+  // copied to USM memory before being executed, to get around the
+  // trivially copyable limitation of SYCL.
+  using IndirectKernelMem = USMObjectMem<sycl::usm::alloc::host>;
+  IndirectKernelMem& get_indirect_kernel_mem();
+
+  bool was_finalized = false;
+
+  static SYCLInternal& singleton();
+
+  int verify_is_initialized(const char* const label) const;
+
+  void initialize(const sycl::device& d);
+
+  void initialize(const sycl::queue& q);
+
+  int is_initialized() const { return m_queue.has_value(); }
+
+  void finalize();
+
+ private:
+  // fence(...) takes any type with a .wait_and_throw() method
+  // (sycl::event and sycl::queue)
+  template <typename WAT>
+  static void fence_helper(WAT& wat, const std::string& name,
+                           uint32_t instance_id);
+
+  const static size_t m_usm_pool_size = 4;
+  std::vector<IndirectKernelMem> m_indirectKernelMem{m_usm_pool_size};
+
+  size_t m_pool_next{0};
+
+ public:
+  static void fence(sycl::queue& q, const std::string& name,
+                    uint32_t instance_id) {
+    fence_helper(q, name, instance_id);
+  }
+  static void fence(sycl::event& e, const std::string& name,
+                    uint32_t instance_id) {
+    fence_helper(e, name, instance_id);
+  }
+};
+
+// FIXME_SYCL the limit is 2048 bytes for all arguments handed to a kernel,
+// assume for now that the rest doesn't need more than 248 bytes.
+#if defined(SYCL_DEVICE_COPYABLE) && defined(KOKKOS_ARCH_INTEL_GPU)
+template <typename Functor, typename Storage,
+          bool ManualCopy = (sizeof(Functor) >= 1800)>
+class SYCLFunctionWrapper;
+#else
+template <typename Functor, typename Storage,
+          bool ManualCopy = (sizeof(Functor) >= 1800 ||
+                             !std::is_trivially_copyable_v<Functor>)>
+class SYCLFunctionWrapper;
+#endif
+
+#if defined(SYCL_DEVICE_COPYABLE) && defined(KOKKOS_ARCH_INTEL_GPU)
+template <typename Functor, typename Storage>
+class SYCLFunctionWrapper<Functor, Storage, false> {
+  // We need a union here so that we can avoid calling a constructor for m_f
+  // and can controll all the special member functions.
+  union TrivialWrapper {
+    TrivialWrapper(){};
+
+    TrivialWrapper(const Functor& f) { std::memcpy(&m_f, &f, sizeof(m_f)); }
+
+    TrivialWrapper(const TrivialWrapper& other) {
+      std::memcpy(&m_f, &other.m_f, sizeof(m_f));
+    }
+    TrivialWrapper(TrivialWrapper&& other) {
+      std::memcpy(&m_f, &other.m_f, sizeof(m_f));
+    }
+    TrivialWrapper& operator=(const TrivialWrapper& other) {
+      std::memcpy(&m_f, &other.m_f, sizeof(m_f));
+      return *this;
+    }
+    TrivialWrapper& operator=(TrivialWrapper&& other) {
+      std::memcpy(&m_f, &other.m_f, sizeof(m_f));
+      return *this;
+    }
+    ~TrivialWrapper(){};
+
+    Functor m_f;
+  } m_functor;
+
+ public:
+  SYCLFunctionWrapper(const Functor& functor, Storage&) : m_functor(functor) {}
+
+  const Functor& get_functor() const { return m_functor.m_f; }
+
+  sycl::event get_copy_event() const { return {}; }
+
+  static void register_event(sycl::event) {}
+};
+#else
+template <typename Functor, typename Storage>
+class SYCLFunctionWrapper<Functor, Storage, false> {
+  const Functor m_functor;
+
+ public:
+  SYCLFunctionWrapper(const Functor& functor, Storage&) : m_functor(functor) {}
+
+  const Functor& get_functor() const { return m_functor; }
+
+  sycl::event get_copy_event() const { return {}; }
+
+  static void register_event(sycl::event) {}
+};
+#endif
+
+template <typename Functor, typename Storage>
+class SYCLFunctionWrapper<Functor, Storage, true> {
+  std::reference_wrapper<const Functor> m_kernelFunctor;
+  std::reference_wrapper<Storage> m_storage;
+
+ public:
+  SYCLFunctionWrapper(const Functor& functor, Storage& storage)
+      : m_kernelFunctor(storage.copy_from(functor)), m_storage(storage) {}
+
+  std::reference_wrapper<const Functor> get_functor() const {
+    return m_kernelFunctor;
+  }
+
+  sycl::event get_copy_event() const {
+    return m_storage.get().get_copy_event();
+  }
+
+  void register_event(sycl::event event) {
+    m_storage.get().register_event(event);
+  }
+};
+
+template <typename Functor, typename Storage>
+auto make_sycl_function_wrapper(const Functor& functor, Storage& storage) {
+  return SYCLFunctionWrapper<Functor, Storage>(functor, storage);
+}
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#if defined(SYCL_DEVICE_COPYABLE) && defined(KOKKOS_ARCH_INTEL_GPU)
+template <typename Functor, typename Storage>
+struct sycl::is_device_copyable<
+    Kokkos::Experimental::Impl::SYCLFunctionWrapper<Functor, Storage, false>>
+    : std::true_type {};
+
+template <typename Functor, typename Storage>
+struct sycl::is_device_copyable<
+    const Kokkos::Experimental::Impl::SYCLFunctionWrapper<Functor, Storage,
+                                                          false>>
+    : std::true_type {};
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_MDRangePolicy.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_MDRangePolicy.hpp
new file mode 100644 (file)
index 0000000..3e90ec1
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef KOKKOS_SYCL_MDRANGEPOLICY_HPP_
+#define KOKKOS_SYCL_MDRANGEPOLICY_HPP_
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+
+template <>
+struct default_outer_direction<Kokkos::Experimental::SYCL> {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Left;
+};
+
+template <>
+struct default_inner_direction<Kokkos::Experimental::SYCL> {
+  using type                     = Iterate;
+  static constexpr Iterate value = Iterate::Left;
+};
+
+namespace Impl {
+
+// Settings for MDRangePolicy
+template <>
+inline TileSizeProperties get_tile_size_properties<Kokkos::Experimental::SYCL>(
+    const Kokkos::Experimental::SYCL& space) {
+  TileSizeProperties properties;
+  properties.max_threads =
+      space.impl_internal_space_instance()->m_maxWorkgroupSize;
+  properties.default_largest_tile_size = 16;
+  properties.default_tile_size         = 2;
+  properties.max_total_tile_size       = properties.max_threads;
+  return properties;
+}
+
+}  // Namespace Impl
+}  // Namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Range.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Range.hpp
new file mode 100644 (file)
index 0000000..cf292f9
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_PARALLEL_RANGE_HPP_
+#define KOKKOS_SYCL_PARALLEL_RANGE_HPP_
+
+#include <impl/KokkosExp_IterateTileGPU.hpp>
+
+#include <vector>
+
+namespace Kokkos::Impl {
+template <typename FunctorWrapper, typename Policy>
+struct FunctorWrapperRangePolicyParallelFor {
+  using WorkTag = typename Policy::work_tag;
+
+  void operator()(sycl::item<1> item) const {
+    const typename Policy::index_type id = item.get_linear_id() + m_begin;
+    if constexpr (std::is_void<WorkTag>::value)
+      m_functor_wrapper.get_functor()(id);
+    else
+      m_functor_wrapper.get_functor()(WorkTag(), id);
+  }
+
+  typename Policy::index_type m_begin;
+  FunctorWrapper m_functor_wrapper;
+};
+}  // namespace Kokkos::Impl
+
+template <class FunctorType, class... Traits>
+class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+                                Kokkos::Experimental::SYCL> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <typename Functor>
+  static sycl::event sycl_direct_launch(const Policy& policy,
+                                        const Functor& functor,
+                                        const sycl::event& memcpy_event) {
+    // Convenience references
+    const Kokkos::Experimental::SYCL& space = policy.space();
+    sycl::queue& q                          = space.sycl_queue();
+
+    auto parallel_for_event = q.submit([&](sycl::handler& cgh) {
+      FunctorWrapperRangePolicyParallelFor<Functor, Policy> f{policy.begin(),
+                                                              functor};
+      sycl::range<1> range(policy.end() - policy.begin());
+      cgh.depends_on(memcpy_event);
+      cgh.parallel_for<FunctorWrapperRangePolicyParallelFor<Functor, Policy>>(
+          range, f);
+    });
+    q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
+
+    return parallel_for_event;
+  }
+
+ public:
+  using functor_type = FunctorType;
+
+  void execute() const {
+    if (m_policy.begin() == m_policy.end()) return;
+
+    Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
+        indirectKernelMem = m_policy.space()
+                                .impl_internal_space_instance()
+                                ->get_indirect_kernel_mem();
+
+    auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_functor, indirectKernelMem);
+    sycl::event event = sycl_direct_launch(m_policy, functor_wrapper,
+                                           functor_wrapper.get_copy_event());
+    functor_wrapper.register_event(event);
+  }
+
+  ParallelFor(const ParallelFor&) = delete;
+  ParallelFor(ParallelFor&&)      = delete;
+  ParallelFor& operator=(const ParallelFor&) = delete;
+  ParallelFor& operator=(ParallelFor&&) = delete;
+  ~ParallelFor()                        = default;
+
+  ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+// ParallelFor
+template <class FunctorType, class... Traits>
+class Kokkos::Impl::ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                                Kokkos::Experimental::SYCL> {
+ public:
+  using Policy = Kokkos::MDRangePolicy<Traits...>;
+
+ private:
+  using array_index_type = typename Policy::array_index_type;
+  using index_type       = typename Policy::index_type;
+  using LaunchBounds     = typename Policy::launch_bounds;
+  using WorkTag          = typename Policy::work_tag;
+
+  const FunctorType m_functor;
+  // MDRangePolicy is not trivially copyable. Hence, replicate the data we
+  // really need in DeviceIterateTile in a trivially copyable struct.
+  const struct BarePolicy {
+    using index_type = typename Policy::index_type;
+
+    BarePolicy(const Policy& policy)
+        : m_lower(policy.m_lower),
+          m_upper(policy.m_upper),
+          m_tile(policy.m_tile),
+          m_tile_end(policy.m_tile_end),
+          m_num_tiles(policy.m_num_tiles) {}
+
+    const typename Policy::point_type m_lower;
+    const typename Policy::point_type m_upper;
+    const typename Policy::tile_type m_tile;
+    const typename Policy::point_type m_tile_end;
+    const typename Policy::index_type m_num_tiles;
+    static constexpr Iterate inner_direction = Policy::inner_direction;
+  } m_policy;
+  const Kokkos::Experimental::SYCL& m_space;
+
+  sycl::nd_range<3> compute_ranges() const {
+    const auto& m_tile     = m_policy.m_tile;
+    const auto& m_tile_end = m_policy.m_tile_end;
+
+    if constexpr (Policy::rank == 2) {
+      sycl::range<3> local_sizes(m_tile[0], m_tile[1], 1);
+      sycl::range<3> global_sizes(m_tile_end[0] * m_tile[0],
+                                  m_tile_end[1] * m_tile[1], 1);
+      return {global_sizes, local_sizes};
+    }
+    if constexpr (Policy::rank == 3) {
+      sycl::range<3> local_sizes(m_tile[0], m_tile[1], m_tile[2]);
+      sycl::range<3> global_sizes(m_tile_end[0] * m_tile[0],
+                                  m_tile_end[1] * m_tile[1],
+                                  m_tile_end[2] * m_tile[2]);
+      return {global_sizes, local_sizes};
+    }
+    if constexpr (Policy::rank == 4) {
+      // id0,id1 encoded within first index; id2 to second index; id3 to third
+      // index
+      sycl::range<3> local_sizes(m_tile[0] * m_tile[1], m_tile[2], m_tile[3]);
+      sycl::range<3> global_sizes(
+          m_tile_end[0] * m_tile[0] * m_tile_end[1] * m_tile[1],
+          m_tile_end[2] * m_tile[2], m_tile_end[3] * m_tile[3]);
+      return {global_sizes, local_sizes};
+    }
+    if constexpr (Policy::rank == 5) {
+      // id0,id1 encoded within first index; id2,id3 to second index; id4 to
+      // third index
+      sycl::range<3> local_sizes(m_tile[0] * m_tile[1], m_tile[2] * m_tile[3],
+                                 m_tile[4]);
+      sycl::range<3> global_sizes(
+          m_tile_end[0] * m_tile[0] * m_tile_end[1] * m_tile[1],
+          m_tile_end[2] * m_tile[2] * m_tile_end[3] * m_tile[3],
+          m_tile_end[4] * m_tile[4]);
+      return {global_sizes, local_sizes};
+    }
+    if constexpr (Policy::rank == 6) {
+      // id0,id1 encoded within first index; id2,id3 to second index; id4,id5 to
+      // third index
+      sycl::range<3> local_sizes(m_tile[0] * m_tile[1], m_tile[2] * m_tile[3],
+                                 m_tile[4] * m_tile[5]);
+      sycl::range<3> global_sizes(
+          m_tile_end[0] * m_tile[0] * m_tile_end[1] * m_tile[1],
+          m_tile_end[2] * m_tile[2] * m_tile_end[3] * m_tile[3],
+          m_tile_end[4] * m_tile[4] * m_tile_end[5] * m_tile[5]);
+      return {global_sizes, local_sizes};
+    }
+    static_assert(Policy::rank > 1 && Policy::rank < 7,
+                  "Kokkos::MDRange Error: Exceeded rank bounds with SYCL\n");
+  }
+
+  template <typename FunctorWrapper>
+  sycl::event sycl_direct_launch(const FunctorWrapper& functor_wrapper,
+                                 const sycl::event& memcpy_event) const {
+    // Convenience references
+    sycl::queue& q = m_space.sycl_queue();
+
+    if (m_policy.m_num_tiles == 0) return {};
+
+    const BarePolicy bare_policy(m_policy);
+
+    auto parallel_for_event = q.submit([&](sycl::handler& cgh) {
+      const auto range                  = compute_ranges();
+      const sycl::range<3> global_range = range.get_global_range();
+      const sycl::range<3> local_range  = range.get_local_range();
+      const sycl::nd_range sycl_swapped_range{
+          sycl::range<3>{global_range[2], global_range[1], global_range[0]},
+          sycl::range<3>{local_range[2], local_range[1], local_range[0]}};
+
+      cgh.depends_on(memcpy_event);
+      cgh.parallel_for(sycl_swapped_range, [functor_wrapper, bare_policy](
+                                               sycl::nd_item<3> item) {
+        // swap back for correct index calculations in DeviceIterateTile
+        const index_type local_x    = item.get_local_id(2);
+        const index_type local_y    = item.get_local_id(1);
+        const index_type local_z    = item.get_local_id(0);
+        const index_type global_x   = item.get_group(2);
+        const index_type global_y   = item.get_group(1);
+        const index_type global_z   = item.get_group(0);
+        const index_type n_global_x = item.get_group_range(2);
+        const index_type n_global_y = item.get_group_range(1);
+        const index_type n_global_z = item.get_group_range(0);
+
+        Kokkos::Impl::DeviceIterateTile<Policy::rank, BarePolicy, FunctorType,
+                                        typename Policy::work_tag>(
+            bare_policy, functor_wrapper.get_functor(),
+            {n_global_x, n_global_y, n_global_z},
+            {global_x, global_y, global_z}, {local_x, local_y, local_z})
+            .exec_range();
+      });
+    });
+    q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
+
+    return parallel_for_event;
+  }
+
+ public:
+  using functor_type = FunctorType;
+
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy& policy, const Functor&) {
+    return policy.space().impl_internal_space_instance()->m_maxWorkgroupSize;
+  }
+
+  void execute() const {
+    Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
+        indirectKernelMem =
+            m_space.impl_internal_space_instance()->get_indirect_kernel_mem();
+
+    auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_functor, indirectKernelMem);
+    sycl::event event =
+        sycl_direct_launch(functor_wrapper, functor_wrapper.get_copy_event());
+    functor_wrapper.register_event(event);
+  }
+
+  ParallelFor(const ParallelFor&) = delete;
+  ParallelFor(ParallelFor&&)      = delete;
+  ParallelFor& operator=(const ParallelFor&) = delete;
+  ParallelFor& operator=(ParallelFor&&) = delete;
+  ~ParallelFor()                        = default;
+
+  ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_space(arg_policy.space()) {}
+};
+
+#endif  // KOKKOS_SYCL_PARALLEL_RANGE_HPP_
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Reduce.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Reduce.hpp
new file mode 100644 (file)
index 0000000..e980a82
--- /dev/null
@@ -0,0 +1,832 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_PARALLEL_REDUCE_HPP
+#define KOKKOS_SYCL_PARALLEL_REDUCE_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <vector>
+#if defined(KOKKOS_ENABLE_SYCL)
+#include <Kokkos_Parallel_Reduce.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+template <class ReducerType>
+inline constexpr bool use_shuffle_based_algorithm =
+    std::is_reference_v<typename ReducerType::reference_type>;
+
+namespace SYCLReduction {
+template <typename ValueType, typename ReducerType, int dim>
+std::enable_if_t<!use_shuffle_based_algorithm<ReducerType>> workgroup_reduction(
+    sycl::nd_item<dim>& item, sycl::local_ptr<ValueType> local_mem,
+    sycl::device_ptr<ValueType> results_ptr,
+    sycl::global_ptr<ValueType> device_accessible_result_ptr,
+    const unsigned int value_count, const ReducerType& final_reducer,
+    bool final, unsigned int max_size) {
+  const auto local_id = item.get_local_linear_id();
+
+  // Perform the actual workgroup reduction in each subgroup
+  // separately.
+  auto sg             = item.get_sub_group();
+  auto* result        = &local_mem[local_id * value_count];
+  const auto id_in_sg = sg.get_local_id()[0];
+  const auto local_range =
+      std::min<unsigned int>(sg.get_local_range()[0], max_size);
+  const auto upper_stride_bound =
+      std::min(local_range - id_in_sg, max_size - local_id);
+  for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+    if (stride < upper_stride_bound)
+      final_reducer.join(result, &local_mem[(local_id + stride) * value_count]);
+    sycl::group_barrier(sg);
+  }
+  sycl::group_barrier(item.get_group());
+
+  // Copy the subgroup results into the first positions of the
+  // reduction array.
+  if (id_in_sg == 0)
+    final_reducer.copy(&local_mem[sg.get_group_id()[0] * value_count], result);
+  sycl::group_barrier(item.get_group());
+
+  // Do the final reduction only using the first subgroup.
+  if (sg.get_group_id()[0] == 0) {
+    const auto n_subgroups = sg.get_group_range()[0];
+    auto* result_          = &local_mem[id_in_sg * value_count];
+    // In case the number of subgroups is larger than the range of
+    // the first subgroup, we first combine the items with a higher
+    // index.
+    for (unsigned int offset = local_range; offset < n_subgroups;
+         offset += local_range)
+      if (id_in_sg + offset < n_subgroups)
+        final_reducer.join(result_,
+                           &local_mem[(id_in_sg + offset) * value_count]);
+    sycl::group_barrier(sg);
+
+    // Then, we proceed as before.
+    for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+      if (id_in_sg + stride < n_subgroups)
+        final_reducer.join(result_,
+                           &local_mem[(id_in_sg + stride) * value_count]);
+      sycl::group_barrier(sg);
+    }
+
+    // Finally, we copy the workgroup results back to global memory
+    // to be used in the next iteration. If this is the last
+    // iteration, i.e., there is only one workgroup also call
+    // final() if necessary.
+    if (id_in_sg == 0) {
+      if (final) {
+        final_reducer.final(&local_mem[0]);
+        if (device_accessible_result_ptr != nullptr)
+          final_reducer.copy(&device_accessible_result_ptr[0], &local_mem[0]);
+        else
+          final_reducer.copy(&results_ptr[0], &local_mem[0]);
+      } else
+        final_reducer.copy(
+            &results_ptr[(item.get_group_linear_id()) * value_count],
+            &local_mem[0]);
+    }
+  }
+}
+
+template <typename ValueType, typename ReducerType, int dim>
+std::enable_if_t<use_shuffle_based_algorithm<ReducerType>> workgroup_reduction(
+    sycl::nd_item<dim>& item, sycl::local_ptr<ValueType> local_mem,
+    ValueType local_value, sycl::device_ptr<ValueType> results_ptr,
+    sycl::global_ptr<ValueType> device_accessible_result_ptr,
+    const ReducerType& final_reducer, bool final, unsigned int max_size) {
+  const auto local_id = item.get_local_linear_id();
+
+  // Perform the actual workgroup reduction in each subgroup
+  // separately.
+  auto sg             = item.get_sub_group();
+  const auto id_in_sg = sg.get_local_id()[0];
+  const auto local_range =
+      std::min<unsigned int>(sg.get_local_range()[0], max_size);
+  const auto upper_stride_bound =
+      std::min(local_range - id_in_sg, max_size - local_id);
+  for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+    auto tmp = sg.shuffle_down(local_value, stride);
+    if (stride < upper_stride_bound) final_reducer.join(&local_value, &tmp);
+  }
+
+  // Copy the subgroup results into the first positions of the
+  // reduction array.
+  const auto max_subgroup_size = sg.get_max_local_range()[0];
+  const auto n_active_subgroups =
+      (max_size + max_subgroup_size - 1) / max_subgroup_size;
+  if (id_in_sg == 0 && sg.get_group_id()[0] <= n_active_subgroups)
+    local_mem[sg.get_group_id()[0]] = local_value;
+  item.barrier(sycl::access::fence_space::local_space);
+
+  // Do the final reduction only using the first subgroup.
+  if (sg.get_group_id()[0] == 0) {
+    auto sg_value = local_mem[id_in_sg < n_active_subgroups ? id_in_sg : 0];
+
+    // In case the number of subgroups is larger than the range of
+    // the first subgroup, we first combine the items with a higher
+    // index.
+    if (n_active_subgroups > local_range) {
+      for (unsigned int offset = local_range; offset < n_active_subgroups;
+           offset += local_range)
+        if (id_in_sg + offset < n_active_subgroups) {
+          final_reducer.join(&sg_value, &local_mem[(id_in_sg + offset)]);
+        }
+      sg.barrier();
+    }
+
+    // Then, we proceed as before.
+    for (unsigned int stride = 1; stride < local_range; stride <<= 1) {
+      auto tmp = sg.shuffle_down(sg_value, stride);
+      if (id_in_sg + stride < n_active_subgroups)
+        final_reducer.join(&sg_value, &tmp);
+    }
+
+    // Finally, we copy the workgroup results back to global memory
+    // to be used in the next iteration. If this is the last
+    // iteration, i.e., there is only one workgroup also call
+    // final() if necessary.
+    if (id_in_sg == 0) {
+      if (final) {
+        final_reducer.final(&sg_value);
+        if (device_accessible_result_ptr != nullptr)
+          device_accessible_result_ptr[0] = sg_value;
+        else
+          results_ptr[0] = sg_value;
+      } else
+        results_ptr[(item.get_group_linear_id())] = sg_value;
+    }
+  }
+}
+
+}  // namespace SYCLReduction
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::SYCL> {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ private:
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+  using execution_space = typename Analysis::execution_space;
+  using value_type      = typename Analysis::value_type;
+  using pointer_type    = typename Analysis::pointer_type;
+  using reference_type  = typename Analysis::reference_type;
+
+  using WorkTag = typename Policy::work_tag;
+
+ public:
+  // V - View
+  template <typename V>
+  ParallelReduce(const FunctorType& f, const Policy& p, const V& v,
+                 std::enable_if_t<Kokkos::is_view<V>::value, void*> = nullptr)
+      : m_functor(f),
+        m_policy(p),
+        m_result_ptr(v.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                              typename V::memory_space>::accessible),
+        m_shared_memory_lock(
+            p.space().impl_internal_space_instance()->m_mutexScratchSpace) {}
+
+  ParallelReduce(const FunctorType& f, const Policy& p,
+                 const ReducerType& reducer)
+      : m_functor(f),
+        m_policy(p),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_shared_memory_lock(
+            p.space().impl_internal_space_instance()->m_mutexScratchSpace) {}
+
+ private:
+  template <typename PolicyType, typename FunctorWrapper,
+            typename ReducerWrapper>
+  sycl::event sycl_direct_launch(
+      const PolicyType& policy, const FunctorWrapper& functor_wrapper,
+      const ReducerWrapper& reducer_wrapper,
+      const std::vector<sycl::event>& memcpy_events) const {
+    // Convenience references
+    const Kokkos::Experimental::SYCL& space = policy.space();
+    Kokkos::Experimental::Impl::SYCLInternal& instance =
+        *space.impl_internal_space_instance();
+    sycl::queue& q = space.sycl_queue();
+
+    // FIXME_SYCL optimize
+    constexpr size_t wgroup_size       = 128;
+    constexpr size_t values_per_thread = 2;
+    std::size_t size                   = policy.end() - policy.begin();
+    const auto init_size               = std::max<std::size_t>(
+        ((size + values_per_thread - 1) / values_per_thread + wgroup_size - 1) /
+            wgroup_size,
+        1);
+    const unsigned int value_count =
+        Analysis::value_count(ReducerConditional::select(m_functor, m_reducer));
+    const auto results_ptr =
+        static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
+            sizeof(value_type) * std::max(value_count, 1u) * init_size));
+    sycl::global_ptr<value_type> device_accessible_result_ptr =
+        m_result_ptr_device_accessible ? m_result_ptr : nullptr;
+    auto scratch_flags = static_cast<sycl::device_ptr<unsigned int>>(
+        instance.scratch_flags(sizeof(unsigned int)));
+
+    sycl::event last_reduction_event;
+
+    // If size<=1 we only call init(), the functor and possibly final once
+    // working with the global scratch memory but don't copy back to
+    // m_result_ptr yet.
+    if (size <= 1) {
+      auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
+        const auto begin = policy.begin();
+        cgh.depends_on(memcpy_events);
+        cgh.single_task([=]() {
+          const auto& functor          = functor_wrapper.get_functor();
+          const auto& selected_reducer = ReducerConditional::select(
+              static_cast<const FunctorType&>(functor),
+              static_cast<const ReducerType&>(reducer_wrapper.get_functor()));
+          typename Analysis::Reducer final_reducer(&selected_reducer);
+          reference_type update = final_reducer.init(results_ptr);
+          if (size == 1) {
+            if constexpr (std::is_void<WorkTag>::value)
+              functor(begin, update);
+            else
+              functor(WorkTag(), begin, update);
+          }
+          final_reducer.final(results_ptr);
+          if (device_accessible_result_ptr != nullptr)
+            final_reducer.copy(device_accessible_result_ptr.get(),
+                               results_ptr.get());
+        });
+      });
+      q.ext_oneapi_submit_barrier(
+          std::vector<sycl::event>{parallel_reduce_event});
+      last_reduction_event = parallel_reduce_event;
+    }
+
+    // Otherwise, we perform a reduction on the values in all workgroups
+    // separately, write the workgroup results back to global memory and recurse
+    // until only one workgroup does the reduction and thus gets the final
+    // value.
+    if (size > 1) {
+      auto n_wgroups = ((size + values_per_thread - 1) / values_per_thread +
+                        wgroup_size - 1) /
+                       wgroup_size;
+      auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
+        sycl::accessor<value_type, 1, sycl::access::mode::read_write,
+                       sycl::access::target::local>
+            local_mem(sycl::range<1>(wgroup_size) * std::max(value_count, 1u),
+                      cgh);
+        sycl::accessor<unsigned int, 1, sycl::access::mode::read_write,
+                       sycl::access::target::local>
+            num_teams_done(1, cgh);
+
+        const auto begin = policy.begin();
+
+        cgh.depends_on(memcpy_events);
+
+        cgh.parallel_for(
+            sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
+            [=](sycl::nd_item<1> item) {
+              const auto local_id = item.get_local_linear_id();
+              const auto global_id =
+                  wgroup_size * item.get_group_linear_id() * values_per_thread +
+                  local_id;
+              const auto& functor          = functor_wrapper.get_functor();
+              const auto& selected_reducer = ReducerConditional::select(
+                  static_cast<const FunctorType&>(functor),
+                  static_cast<const ReducerType&>(
+                      reducer_wrapper.get_functor()));
+              typename Analysis::Reducer final_reducer(&selected_reducer);
+
+              using index_type       = typename Policy::index_type;
+              const auto upper_bound = std::min<index_type>(
+                  global_id + values_per_thread * wgroup_size, size);
+
+              if constexpr (Analysis::StaticValueSize == 0) {
+                reference_type update =
+                    final_reducer.init(&local_mem[local_id * value_count]);
+                for (index_type id = global_id; id < upper_bound;
+                     id += wgroup_size) {
+                  if constexpr (std::is_void<WorkTag>::value)
+                    functor(id + begin, update);
+                  else
+                    functor(WorkTag(), id + begin, update);
+                }
+                item.barrier(sycl::access::fence_space::local_space);
+
+                SYCLReduction::workgroup_reduction<>(
+                    item, local_mem.get_pointer(), results_ptr,
+                    device_accessible_result_ptr, value_count, final_reducer,
+                    false, std::min(size, wgroup_size));
+
+                if (local_id == 0) {
+                  sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
+                                   sycl::memory_scope::device,
+                                   sycl::access::address_space::global_space>
+                      scratch_flags_ref(*scratch_flags);
+                  num_teams_done[0] = ++scratch_flags_ref;
+                }
+                item.barrier(sycl::access::fence_space::local_space);
+                if (num_teams_done[0] == n_wgroups) {
+                  if (local_id >= n_wgroups)
+                    final_reducer.init(&local_mem[local_id * value_count]);
+                  else {
+                    final_reducer.copy(&local_mem[local_id * value_count],
+                                       &results_ptr[local_id * value_count]);
+                    for (unsigned int id = local_id + wgroup_size;
+                         id < n_wgroups; id += wgroup_size) {
+                      final_reducer.join(&local_mem[local_id * value_count],
+                                         &results_ptr[id * value_count]);
+                    }
+                  }
+
+                  SYCLReduction::workgroup_reduction<>(
+                      item, local_mem.get_pointer(), results_ptr,
+                      device_accessible_result_ptr, value_count, final_reducer,
+                      true, std::min(n_wgroups, wgroup_size));
+                }
+              } else {
+                value_type local_value;
+                reference_type update = final_reducer.init(&local_value);
+                for (index_type id = global_id; id < upper_bound;
+                     id += wgroup_size) {
+                  if constexpr (std::is_void<WorkTag>::value)
+                    functor(id + begin, update);
+                  else
+                    functor(WorkTag(), id + begin, update);
+                }
+
+                SYCLReduction::workgroup_reduction<>(
+                    item, local_mem.get_pointer(), local_value, results_ptr,
+                    device_accessible_result_ptr, final_reducer, false,
+                    std::min(size, wgroup_size));
+
+                if (local_id == 0) {
+                  sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
+                                   sycl::memory_scope::device,
+                                   sycl::access::address_space::global_space>
+                      scratch_flags_ref(*scratch_flags);
+                  num_teams_done[0] = ++scratch_flags_ref;
+                }
+                item.barrier(sycl::access::fence_space::local_space);
+                if (num_teams_done[0] == n_wgroups) {
+                  if (local_id >= n_wgroups)
+                    final_reducer.init(&local_value);
+                  else {
+                    local_value = results_ptr[local_id];
+                    for (unsigned int id = local_id + wgroup_size;
+                         id < n_wgroups; id += wgroup_size) {
+                      final_reducer.join(&local_value, &results_ptr[id]);
+                    }
+                  }
+
+                  SYCLReduction::workgroup_reduction<>(
+                      item, local_mem.get_pointer(), local_value, results_ptr,
+                      device_accessible_result_ptr, final_reducer, true,
+                      std::min(n_wgroups, wgroup_size));
+                }
+              }
+            });
+      });
+      last_reduction_event       = q.ext_oneapi_submit_barrier(
+          std::vector<sycl::event>{parallel_reduce_event});
+    }
+
+    // At this point, the reduced value is written to the entry in results_ptr
+    // and all that is left is to copy it back to the given result pointer if
+    // necessary.
+    if (m_result_ptr && !m_result_ptr_device_accessible) {
+      Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                             Kokkos::Experimental::SYCLDeviceUSMSpace>(
+          space, m_result_ptr, results_ptr,
+          sizeof(*m_result_ptr) * value_count);
+    }
+
+    return last_reduction_event;
+  }
+
+ public:
+  void execute() const {
+    Kokkos::Experimental::Impl::SYCLInternal& instance =
+        *m_policy.space().impl_internal_space_instance();
+    using IndirectKernelMem =
+        Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem;
+    IndirectKernelMem& indirectKernelMem  = instance.get_indirect_kernel_mem();
+    IndirectKernelMem& indirectReducerMem = instance.get_indirect_kernel_mem();
+
+    auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_functor, indirectKernelMem);
+    auto reducer_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_reducer, indirectReducerMem);
+
+    sycl::event event = sycl_direct_launch(
+        m_policy, functor_wrapper, reducer_wrapper,
+        {functor_wrapper.get_copy_event(), reducer_wrapper.get_copy_event()});
+    functor_wrapper.register_event(event);
+    reducer_wrapper.register_event(event);
+  }
+
+ private:
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+
+  // Only let one Parallel/Scan modify the shared memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::scoped_lock<std::mutex> m_shared_memory_lock;
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::Experimental::SYCL> {
+ public:
+  using Policy = Kokkos::MDRangePolicy<Traits...>;
+
+ private:
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+  using execution_space = typename Analysis::execution_space;
+  using value_type      = typename Analysis::value_type;
+  using pointer_type    = typename Analysis::pointer_type;
+  using reference_type  = typename Analysis::reference_type;
+
+  using WorkTag = typename Policy::work_tag;
+
+  // MDRangePolicy is not trivially copyable. Hence, replicate the data we
+  // really need in DeviceIterateTile in a trivially copyable struct.
+  struct BarePolicy {
+    using index_type = typename Policy::index_type;
+
+    BarePolicy(const Policy& policy)
+        : m_lower(policy.m_lower),
+          m_upper(policy.m_upper),
+          m_tile(policy.m_tile),
+          m_tile_end(policy.m_tile_end),
+          m_num_tiles(policy.m_num_tiles),
+          m_prod_tile_dims(policy.m_prod_tile_dims) {}
+
+    const typename Policy::point_type m_lower;
+    const typename Policy::point_type m_upper;
+    const typename Policy::tile_type m_tile;
+    const typename Policy::point_type m_tile_end;
+    const typename Policy::index_type m_num_tiles;
+    const typename Policy::index_type m_prod_tile_dims;
+    static constexpr Iterate inner_direction = Policy::inner_direction;
+    static constexpr int rank                = Policy::rank;
+  };
+
+ public:
+  // V - View
+  template <typename V>
+  ParallelReduce(const FunctorType& f, const Policy& p, const V& v,
+                 std::enable_if_t<Kokkos::is_view<V>::value, void*> = nullptr)
+      : m_functor(f),
+        m_policy(p),
+        m_space(p.space()),
+        m_result_ptr(v.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                              typename V::memory_space>::accessible),
+        m_shared_memory_lock(
+            m_space.impl_internal_space_instance()->m_mutexScratchSpace) {}
+
+  ParallelReduce(const FunctorType& f, const Policy& p,
+                 const ReducerType& reducer)
+      : m_functor(f),
+        m_policy(p),
+        m_space(p.space()),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_shared_memory_lock(
+            m_space.impl_internal_space_instance()->m_mutexScratchSpace) {}
+
+ private:
+  template <typename PolicyType, typename FunctorWrapper,
+            typename ReducerWrapper>
+  sycl::event sycl_direct_launch(
+      const PolicyType& policy, const FunctorWrapper& functor_wrapper,
+      const ReducerWrapper& reducer_wrapper,
+      const std::vector<sycl::event>& memcpy_events) const {
+    // Convenience references
+    Kokkos::Experimental::Impl::SYCLInternal& instance =
+        *m_space.impl_internal_space_instance();
+    sycl::queue& q = m_space.sycl_queue();
+
+    const typename Policy::index_type nwork = m_policy.m_num_tiles;
+    const typename Policy::index_type block_size =
+        std::pow(2, std::ceil(std::log2(m_policy.m_prod_tile_dims)));
+
+    const sycl::range<1> local_range(block_size);
+    // REMEMBER swap local x<->y to be conforming with Cuda/HIP implementation
+    const sycl::range<1> global_range(nwork * block_size);
+    const sycl::nd_range<1> range{global_range, local_range};
+
+    const size_t wgroup_size = range.get_local_range().size();
+    size_t size              = range.get_global_range().size();
+    const auto init_size =
+        std::max<std::size_t>((size + wgroup_size - 1) / wgroup_size, 1);
+    const unsigned int value_count =
+        Analysis::value_count(ReducerConditional::select(m_functor, m_reducer));
+    const auto results_ptr =
+        static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
+            sizeof(value_type) * std::max(value_count, 1u) * init_size));
+    sycl::global_ptr<value_type> device_accessible_result_ptr =
+        m_result_ptr_device_accessible ? m_result_ptr : nullptr;
+    auto scratch_flags = static_cast<sycl::device_ptr<unsigned int>>(
+        instance.scratch_flags(sizeof(unsigned int)));
+
+    sycl::event last_reduction_event;
+
+    // If size<=1 we only call init(), the functor and possibly final once
+    // working with the global scratch memory but don't copy back to
+    // m_result_ptr yet.
+    if (size <= 1) {
+      auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
+        cgh.depends_on(memcpy_events);
+        cgh.single_task([=]() {
+          const auto& functor          = functor_wrapper.get_functor();
+          const auto& selected_reducer = ReducerConditional::select(
+              static_cast<const FunctorType&>(functor),
+              static_cast<const ReducerType&>(reducer_wrapper.get_functor()));
+          typename Analysis::Reducer final_reducer(&selected_reducer);
+
+          reference_type update = final_reducer.init(results_ptr);
+          if (size == 1) {
+            Kokkos::Impl::Reduce::DeviceIterateTile<
+                Policy::rank, BarePolicy, FunctorType,
+                typename Policy::work_tag, reference_type>(
+                policy, functor, update, {1, 1, 1}, {0, 0, 0}, {0, 0, 0})
+                .exec_range();
+          }
+          final_reducer.final(results_ptr);
+          if (device_accessible_result_ptr)
+            final_reducer.copy(device_accessible_result_ptr.get(),
+                               results_ptr.get());
+        });
+      });
+      q.ext_oneapi_submit_barrier(
+          std::vector<sycl::event>{parallel_reduce_event});
+      last_reduction_event = parallel_reduce_event;
+    }
+
+    // Otherwise, we perform a reduction on the values in all workgroups
+    // separately, write the workgroup results back to global memory and recurse
+    // until only one workgroup does the reduction and thus gets the final
+    // value.
+    if (size > 1) {
+      auto n_wgroups             = (size + wgroup_size - 1) / wgroup_size;
+      auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
+        sycl::accessor<value_type, 1, sycl::access::mode::read_write,
+                       sycl::access::target::local>
+            local_mem(sycl::range<1>(wgroup_size) * std::max(value_count, 1u),
+                      cgh);
+        sycl::accessor<unsigned int, 1, sycl::access::mode::read_write,
+                       sycl::access::target::local>
+            num_teams_done(1, cgh);
+
+        const BarePolicy bare_policy = m_policy;
+
+        cgh.depends_on(memcpy_events);
+
+        cgh.parallel_for(range, [=](sycl::nd_item<1> item) {
+          const auto local_id          = item.get_local_linear_id();
+          const auto& functor          = functor_wrapper.get_functor();
+          const auto& selected_reducer = ReducerConditional::select(
+              static_cast<const FunctorType&>(functor),
+              static_cast<const ReducerType&>(reducer_wrapper.get_functor()));
+          typename Analysis::Reducer final_reducer(&selected_reducer);
+
+          // In the first iteration, we call functor to initialize the local
+          // memory. Otherwise, the local memory is initialized with the
+          // results from the previous iteration that are stored in global
+          // memory.
+          using index_type = typename Policy::index_type;
+
+          // SWAPPED here to be conforming with CUDA implementation
+          const index_type local_x    = 0;
+          const index_type local_y    = item.get_local_id(0);
+          const index_type local_z    = 0;
+          const index_type global_x   = item.get_group(0);
+          const index_type global_y   = 0;
+          const index_type global_z   = 0;
+          const index_type n_global_x = item.get_group_range(0);
+          const index_type n_global_y = 1;
+          const index_type n_global_z = 1;
+
+          if constexpr (Analysis::StaticValueSize == 0) {
+            reference_type update =
+                final_reducer.init(&local_mem[local_id * value_count]);
+
+            Kokkos::Impl::Reduce::DeviceIterateTile<
+                Policy::rank, BarePolicy, FunctorType,
+                typename Policy::work_tag, reference_type>(
+                bare_policy, functor, update,
+                {n_global_x, n_global_y, n_global_z},
+                {global_x, global_y, global_z}, {local_x, local_y, local_z})
+                .exec_range();
+            item.barrier(sycl::access::fence_space::local_space);
+
+            SYCLReduction::workgroup_reduction<>(
+                item, local_mem.get_pointer(), results_ptr,
+                device_accessible_result_ptr, value_count, final_reducer, false,
+                std::min(size, wgroup_size));
+
+            if (local_id == 0) {
+              sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
+                               sycl::memory_scope::device,
+                               sycl::access::address_space::global_space>
+                  scratch_flags_ref(*scratch_flags);
+              num_teams_done[0] = ++scratch_flags_ref;
+            }
+            item.barrier(sycl::access::fence_space::local_space);
+            if (num_teams_done[0] == n_wgroups) {
+              if (local_id >= n_wgroups)
+                final_reducer.init(&local_mem[local_id * value_count]);
+              else {
+                final_reducer.copy(&local_mem[local_id * value_count],
+                                   &results_ptr[local_id * value_count]);
+                for (unsigned int id = local_id + wgroup_size; id < n_wgroups;
+                     id += wgroup_size) {
+                  final_reducer.join(&local_mem[local_id * value_count],
+                                     &results_ptr[id * value_count]);
+                }
+              }
+
+              SYCLReduction::workgroup_reduction<>(
+                  item, local_mem.get_pointer(), results_ptr,
+                  device_accessible_result_ptr, value_count, final_reducer,
+                  true, std::min(n_wgroups, wgroup_size));
+            }
+          } else {
+            value_type local_value;
+            reference_type update = final_reducer.init(&local_value);
+
+            Kokkos::Impl::Reduce::DeviceIterateTile<
+                Policy::rank, BarePolicy, FunctorType,
+                typename Policy::work_tag, reference_type>(
+                bare_policy, functor, update,
+                {n_global_x, n_global_y, n_global_z},
+                {global_x, global_y, global_z}, {local_x, local_y, local_z})
+                .exec_range();
+
+            SYCLReduction::workgroup_reduction<>(
+                item, local_mem.get_pointer(), local_value, results_ptr,
+                device_accessible_result_ptr, final_reducer, false,
+                std::min(size, wgroup_size));
+
+            if (local_id == 0) {
+              sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
+                               sycl::memory_scope::device,
+                               sycl::access::address_space::global_space>
+                  scratch_flags_ref(*scratch_flags);
+              num_teams_done[0] = ++scratch_flags_ref;
+            }
+            item.barrier(sycl::access::fence_space::local_space);
+            if (num_teams_done[0] == n_wgroups) {
+              if (local_id >= n_wgroups)
+                final_reducer.init(&local_value);
+              else {
+                local_value = results_ptr[local_id];
+                for (unsigned int id = local_id + wgroup_size; id < n_wgroups;
+                     id += wgroup_size) {
+                  final_reducer.join(&local_value, &results_ptr[id]);
+                }
+              }
+
+              SYCLReduction::workgroup_reduction<>(
+                  item, local_mem.get_pointer(), local_value, results_ptr,
+                  device_accessible_result_ptr, final_reducer, true,
+                  std::min(n_wgroups, wgroup_size));
+            }
+          }
+        });
+      });
+      last_reduction_event       = q.ext_oneapi_submit_barrier(
+          std::vector<sycl::event>{parallel_reduce_event});
+    }
+
+    // At this point, the reduced value is written to the entry in results_ptr
+    // and all that is left is to copy it back to the given result pointer if
+    // necessary.
+    if (m_result_ptr && !m_result_ptr_device_accessible) {
+      Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                             Kokkos::Experimental::SYCLDeviceUSMSpace>(
+          m_space, m_result_ptr, results_ptr,
+          sizeof(*m_result_ptr) * value_count);
+    }
+
+    return last_reduction_event;
+  }
+
+ public:
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy& policy, const Functor&) {
+    return policy.space().impl_internal_space_instance()->m_maxWorkgroupSize;
+  }
+
+  void execute() const {
+    Kokkos::Experimental::Impl::SYCLInternal& instance =
+        *m_space.impl_internal_space_instance();
+    using IndirectKernelMem =
+        Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem;
+    IndirectKernelMem& indirectKernelMem  = instance.get_indirect_kernel_mem();
+    IndirectKernelMem& indirectReducerMem = instance.get_indirect_kernel_mem();
+
+    auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_functor, indirectKernelMem);
+    auto reducer_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_reducer, indirectReducerMem);
+
+    sycl::event event = sycl_direct_launch(
+        m_policy, functor_wrapper, reducer_wrapper,
+        {functor_wrapper.get_copy_event(), reducer_wrapper.get_copy_event()});
+    functor_wrapper.register_event(event);
+    reducer_wrapper.register_event(event);
+  }
+
+ private:
+  const FunctorType m_functor;
+  const BarePolicy m_policy;
+  const Kokkos::Experimental::SYCL& m_space;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+
+  // Only let one Parallel/Scan modify the shared memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::scoped_lock<std::mutex> m_shared_memory_lock;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif
+#endif /* KOKKOS_SYCL_PARALLEL_REDUCE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Scan.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Scan.hpp
new file mode 100644 (file)
index 0000000..e2afc97
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKO_SYCL_PARALLEL_SCAN_HPP
+#define KOKKO_SYCL_PARALLEL_SCAN_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <memory>
+#include <vector>
+#if defined(KOKKOS_ENABLE_SYCL)
+
+namespace Kokkos {
+namespace Impl {
+
+// Perform a scan over a workgroup.
+// At the end of this function, the subgroup scans are stored in the local array
+// such that the last value (at position n_active_subgroups-1) contains the
+// total sum.
+template <int dim, typename ValueType, typename FunctorType>
+void workgroup_scan(sycl::nd_item<dim> item, const FunctorType& final_reducer,
+                    sycl::local_ptr<ValueType> local_mem,
+                    ValueType& local_value, unsigned int global_range) {
+  // subgroup scans
+  auto sg                = item.get_sub_group();
+  const auto sg_group_id = sg.get_group_id()[0];
+  const auto id_in_sg    = sg.get_local_id()[0];
+  for (unsigned int stride = 1; stride < global_range; stride <<= 1) {
+    auto tmp = sg.shuffle_up(local_value, stride);
+    if (id_in_sg >= stride) final_reducer.join(&local_value, &tmp);
+  }
+
+  const auto max_subgroup_size = sg.get_max_local_range()[0];
+  const auto n_active_subgroups =
+      (global_range + max_subgroup_size - 1) / max_subgroup_size;
+
+  const auto local_range = sg.get_local_range()[0];
+  if (id_in_sg == local_range - 1 && sg_group_id < n_active_subgroups)
+    local_mem[sg_group_id] = local_value;
+  local_value = sg.shuffle_up(local_value, 1);
+  if (id_in_sg == 0) final_reducer.init(&local_value);
+  sycl::group_barrier(item.get_group());
+
+  // scan subgroup results using the first subgroup
+  if (n_active_subgroups > 1) {
+    if (sg_group_id == 0) {
+      const auto n_rounds =
+          (n_active_subgroups + local_range - 1) / local_range;
+      for (unsigned int round = 0; round < n_rounds; ++round) {
+        const unsigned int idx = id_in_sg + round * local_range;
+        const auto upper_bound =
+            std::min(local_range, n_active_subgroups - round * local_range);
+        auto local_sg_value = local_mem[idx < n_active_subgroups ? idx : 0];
+        for (unsigned int stride = 1; stride < upper_bound; stride <<= 1) {
+          auto tmp = sg.shuffle_up(local_sg_value, stride);
+          if (id_in_sg >= stride) {
+            if (idx < n_active_subgroups)
+              final_reducer.join(&local_sg_value, &tmp);
+            else
+              local_sg_value = tmp;
+          }
+        }
+        if (idx < n_active_subgroups) {
+          local_mem[idx] = local_sg_value;
+          if (round > 0)
+            final_reducer.join(&local_mem[idx],
+                               &local_mem[round * local_range - 1]);
+        }
+        if (round + 1 < n_rounds) sycl::group_barrier(sg);
+      }
+    }
+    sycl::group_barrier(item.get_group());
+  }
+
+  // add results to all subgroups
+  if (sg_group_id > 0)
+    final_reducer.join(&local_value, &local_mem[sg_group_id - 1]);
+}
+
+template <class FunctorType, class... Traits>
+class ParallelScanSYCLBase {
+ public:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+ protected:
+  using Member       = typename Policy::member_type;
+  using WorkTag      = typename Policy::work_tag;
+  using WorkRange    = typename Policy::WorkRange;
+  using LaunchBounds = typename Policy::launch_bounds;
+
+ public:
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+  using functor_type   = FunctorType;
+  using size_type      = Kokkos::Experimental::SYCL::size_type;
+  using index_type     = typename Policy::index_type;
+
+ protected:
+  const FunctorType m_functor;
+  const Policy m_policy;
+  pointer_type m_scratch_space = nullptr;
+
+  // Only let one Parallel/Scan modify the shared memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::scoped_lock<std::mutex> m_shared_memory_lock;
+
+ private:
+  template <typename FunctorWrapper>
+  void scan_internal(sycl::queue& q, const FunctorWrapper& functor_wrapper,
+                     pointer_type global_mem, std::size_t size) const {
+    // FIXME_SYCL optimize
+    constexpr size_t wgroup_size = 128;
+    auto n_wgroups               = (size + wgroup_size - 1) / wgroup_size;
+    pointer_type group_results   = global_mem + n_wgroups * wgroup_size;
+
+    auto local_scans = q.submit([&](sycl::handler& cgh) {
+      // Store subgroup totals
+      const auto min_subgroup_size =
+          q.get_device()
+              .template get_info<sycl::info::device::sub_group_sizes>()
+              .front();
+      sycl::accessor<value_type, 1, sycl::access::mode::read_write,
+                     sycl::access::target::local>
+          local_mem(sycl::range<1>((wgroup_size + min_subgroup_size - 1) /
+                                   min_subgroup_size),
+                    cgh);
+
+      cgh.parallel_for(
+          sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
+          [=](sycl::nd_item<1> item) {
+            const FunctorType& functor = functor_wrapper.get_functor();
+            typename Analysis::Reducer final_reducer(&functor);
+
+            const auto local_id  = item.get_local_linear_id();
+            const auto global_id = item.get_global_linear_id();
+
+            // Initialize local memory
+            value_type local_value;
+            if (global_id < size)
+              local_value = global_mem[global_id];
+            else
+              final_reducer.init(&local_value);
+
+            workgroup_scan<>(item, final_reducer, local_mem.get_pointer(),
+                             local_value, wgroup_size);
+
+            if (n_wgroups > 1 && local_id == wgroup_size - 1)
+              group_results[item.get_group_linear_id()] =
+                  local_mem[item.get_sub_group().get_group_range()[0] - 1];
+
+            // Write results to global memory
+            if (global_id < size) global_mem[global_id] = local_value;
+          });
+    });
+    q.ext_oneapi_submit_barrier(std::vector<sycl::event>{local_scans});
+
+    if (n_wgroups > 1) {
+      scan_internal(q, functor_wrapper, group_results, n_wgroups);
+      auto update_with_group_results = q.submit([&](sycl::handler& cgh) {
+        cgh.parallel_for(
+            sycl::nd_range<1>(n_wgroups * wgroup_size, wgroup_size),
+            [=](sycl::nd_item<1> item) {
+              const auto global_id       = item.get_global_linear_id();
+              const FunctorType& functor = functor_wrapper.get_functor();
+              typename Analysis::Reducer final_reducer(&functor);
+              if (global_id < size)
+                final_reducer.join(&global_mem[global_id],
+                                   &group_results[item.get_group_linear_id()]);
+            });
+      });
+      q.ext_oneapi_submit_barrier(
+          std::vector<sycl::event>{update_with_group_results});
+    }
+  }
+
+  template <typename FunctorWrapper>
+  sycl::event sycl_direct_launch(const FunctorWrapper& functor_wrapper,
+                                 sycl::event memcpy_event) const {
+    // Convenience references
+    const Kokkos::Experimental::SYCL& space = m_policy.space();
+    sycl::queue& q                          = space.sycl_queue();
+
+    const std::size_t len = m_policy.end() - m_policy.begin();
+
+    // Initialize global memory
+    auto initialize_global_memory = q.submit([&](sycl::handler& cgh) {
+      auto global_mem = m_scratch_space;
+      auto begin      = m_policy.begin();
+
+      cgh.depends_on(memcpy_event);
+      cgh.parallel_for(sycl::range<1>(len), [=](sycl::item<1> item) {
+        const typename Policy::index_type id =
+            static_cast<typename Policy::index_type>(item.get_id()) + begin;
+        const FunctorType& functor = functor_wrapper.get_functor();
+        typename Analysis::Reducer final_reducer(&functor);
+
+        value_type update{};
+        final_reducer.init(&update);
+        if constexpr (std::is_void<WorkTag>::value)
+          functor_wrapper.get_functor()(id, update, false);
+        else
+          functor_wrapper.get_functor()(WorkTag(), id, update, false);
+        global_mem[id] = update;
+      });
+    });
+    q.ext_oneapi_submit_barrier(
+        std::vector<sycl::event>{initialize_global_memory});
+
+    // Perform the actual exclusive scan
+    scan_internal(q, functor_wrapper, m_scratch_space, len);
+
+    // Write results to global memory
+    auto update_global_results = q.submit([&](sycl::handler& cgh) {
+      auto global_mem = m_scratch_space;
+      cgh.parallel_for(sycl::range<1>(len), [=](sycl::item<1> item) {
+        auto global_id = item.get_id(0);
+
+        value_type update = global_mem[global_id];
+        if constexpr (std::is_void<WorkTag>::value)
+          functor_wrapper.get_functor()(global_id, update, true);
+        else
+          functor_wrapper.get_functor()(WorkTag(), global_id, update, true);
+        global_mem[global_id] = update;
+      });
+    });
+    q.ext_oneapi_submit_barrier(
+        std::vector<sycl::event>{update_global_results});
+    return update_global_results;
+  }
+
+ public:
+  template <typename PostFunctor>
+  void impl_execute(const PostFunctor& post_functor) {
+    if (m_policy.begin() == m_policy.end()) return;
+
+    auto& instance        = *m_policy.space().impl_internal_space_instance();
+    const std::size_t len = m_policy.end() - m_policy.begin();
+
+    // Compute the total amount of memory we will need. We emulate the recursive
+    // structure that is used to do the actual scan. Essentially, we need to
+    // allocate memory for the whole range and then recursively for the reduced
+    // group results until only one group is left.
+    std::size_t total_memory = 0;
+    {
+      size_t wgroup_size   = 128;
+      size_t n_nested_size = len;
+      size_t n_nested_wgroups;
+      do {
+        n_nested_wgroups = (n_nested_size + wgroup_size - 1) / wgroup_size;
+        n_nested_size    = n_nested_wgroups;
+        total_memory += sizeof(value_type) * n_nested_wgroups * wgroup_size;
+      } while (n_nested_wgroups > 1);
+      total_memory += sizeof(value_type) * wgroup_size;
+    }
+
+    // FIXME_SYCL consider only storing one value per block and recreate initial
+    // results in the end before doing the final pass
+    m_scratch_space = static_cast<sycl::device_ptr<value_type>>(
+        instance.scratch_space(total_memory));
+
+    Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
+        indirectKernelMem = instance.get_indirect_kernel_mem();
+
+    auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_functor, indirectKernelMem);
+
+    sycl::event event =
+        sycl_direct_launch(functor_wrapper, functor_wrapper.get_copy_event());
+    functor_wrapper.register_event(event);
+    post_functor();
+  }
+
+  ParallelScanSYCLBase(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_shared_memory_lock(m_policy.space()
+                                 .impl_internal_space_instance()
+                                 ->m_mutexScratchSpace) {}
+};
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                   Kokkos::Experimental::SYCL>
+    : private ParallelScanSYCLBase<FunctorType, Traits...> {
+ public:
+  using Base = ParallelScanSYCLBase<FunctorType, Traits...>;
+
+  inline void execute() {
+    Base::impl_execute([]() {});
+  }
+
+  ParallelScan(const FunctorType& arg_functor,
+               const typename Base::Policy& arg_policy)
+      : Base(arg_functor, arg_policy) {}
+};
+
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::Experimental::SYCL>
+    : private ParallelScanSYCLBase<FunctorType, Traits...> {
+ public:
+  using Base = ParallelScanSYCLBase<FunctorType, Traits...>;
+
+  ReturnType& m_returnvalue;
+  const Kokkos::Experimental::SYCL& m_exec;
+
+  inline void execute() {
+    Base::impl_execute([&]() {
+      const long long nwork = Base::m_policy.end() - Base::m_policy.begin();
+      if (nwork > 0) {
+        const int size = Base::Analysis::value_size(Base::m_functor);
+        DeepCopy<HostSpace, Kokkos::Experimental::SYCLDeviceUSMSpace,
+                 Kokkos::Experimental::SYCL>(
+            m_exec, &m_returnvalue, Base::m_scratch_space + nwork - 1, size);
+      }
+    });
+  }
+
+  ParallelScanWithTotal(const FunctorType& arg_functor,
+                        const typename Base::Policy& arg_policy,
+                        ReturnType& arg_returnvalue)
+      : Base(arg_functor, arg_policy),
+        m_returnvalue(arg_returnvalue),
+        m_exec(arg_policy.space()) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Team.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Parallel_Team.hpp
new file mode 100644 (file)
index 0000000..5ac7d8a
--- /dev/null
@@ -0,0 +1,982 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_PARALLEL_TEAM_HPP
+#define KOKKOS_SYCL_PARALLEL_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <SYCL/Kokkos_SYCL_Parallel_Reduce.hpp>  // workgroup_reduction
+#include <SYCL/Kokkos_SYCL_Team.hpp>
+
+#include <vector>
+
+namespace Kokkos {
+namespace Impl {
+template <typename... Properties>
+class TeamPolicyInternal<Kokkos::Experimental::SYCL, Properties...>
+    : public PolicyTraits<Properties...> {
+ public:
+  using execution_policy = TeamPolicyInternal;
+
+  using traits = PolicyTraits<Properties...>;
+
+  template <typename ExecSpace, typename... OtherProperties>
+  friend class TeamPolicyInternal;
+
+ private:
+  typename traits::execution_space m_space;
+  int m_league_size;
+  int m_team_size;
+  int m_vector_length;
+  size_t m_team_scratch_size[2];
+  size_t m_thread_scratch_size[2];
+  int m_chunk_size;
+  bool m_tune_team_size;
+  bool m_tune_vector_length;
+
+ public:
+  using execution_space = Kokkos::Experimental::SYCL;
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(TeamPolicyInternal<OtherProperties...> const& p) {
+    m_league_size            = p.m_league_size;
+    m_team_size              = p.m_team_size;
+    m_vector_length          = p.m_vector_length;
+    m_team_scratch_size[0]   = p.m_team_scratch_size[0];
+    m_team_scratch_size[1]   = p.m_team_scratch_size[1];
+    m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+    m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+    m_chunk_size             = p.m_chunk_size;
+    m_space                  = p.m_space;
+    m_tune_team_size         = p.m_tune_team_size;
+    m_tune_vector_length     = p.m_tune_vector_length;
+  }
+
+  template <typename FunctorType>
+  int team_size_max(FunctorType const& f, ParallelForTag const&) const {
+    return internal_team_size_max_for(f);
+  }
+
+  template <class FunctorType>
+  inline int team_size_max(const FunctorType& f,
+                           const ParallelReduceTag&) const {
+    return internal_team_size_max_reduce(f);
+  }
+
+  template <class FunctorType, class ReducerType>
+  inline int team_size_max(const FunctorType& f, const ReducerType& /*r*/,
+                           const ParallelReduceTag&) const {
+    return internal_team_size_max_reduce(f);
+  }
+
+  template <typename FunctorType>
+  int team_size_recommended(FunctorType const& f, ParallelForTag const&) const {
+    return internal_team_size_recommended_for(f);
+  }
+
+  template <typename FunctorType>
+  inline int team_size_recommended(FunctorType const& f,
+                                   ParallelReduceTag const&) const {
+    return internal_team_size_recommended_reduce(f);
+  }
+
+  template <class FunctorType, class ReducerType>
+  int team_size_recommended(FunctorType const& f, ReducerType const&,
+                            ParallelReduceTag const&) const {
+    return internal_team_size_recommended_reduce(f);
+  }
+  inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
+  inline bool impl_auto_team_size() const { return m_tune_team_size; }
+  // FIXME_SYCL This is correct in most cases, but not necessarily in case a
+  // custom sycl::queue is used to initialize the execution space.
+  static int vector_length_max() {
+    std::vector<size_t> sub_group_sizes =
+        execution_space{}
+            .impl_internal_space_instance()
+            ->m_queue->get_device()
+            .template get_info<sycl::info::device::sub_group_sizes>();
+    return *std::max_element(sub_group_sizes.begin(), sub_group_sizes.end());
+  }
+
+ private:
+  static int verify_requested_vector_length(int requested_vector_length) {
+    int test_vector_length =
+        std::min(requested_vector_length, vector_length_max());
+
+    // Allow only power-of-two vector_length
+    if (!(is_integral_power_of_two(test_vector_length))) {
+      int test_pow2 = 1;
+      while (test_pow2 < test_vector_length) test_pow2 <<= 1;
+      test_vector_length = test_pow2 >> 1;
+    }
+
+    return test_vector_length;
+  }
+
+ public:
+  static int scratch_size_max(int level) {
+    return level == 0 ? 1024 * 32
+                      :           // FIXME_SYCL arbitrarily setting this to 32kB
+               20 * 1024 * 1024;  // FIXME_SYCL arbitrarily setting this to 20MB
+  }
+  inline void impl_set_vector_length(size_t size) { m_vector_length = size; }
+  inline void impl_set_team_size(size_t size) { m_team_size = size; }
+  int impl_vector_length() const { return m_vector_length; }
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  KOKKOS_DEPRECATED int vector_length() const { return impl_vector_length(); }
+#endif
+
+  int team_size() const { return m_team_size; }
+
+  int league_size() const { return m_league_size; }
+
+  size_t scratch_size(int level, int team_size_ = -1) const {
+    if (team_size_ < 0) team_size_ = m_team_size;
+    return m_team_scratch_size[level] +
+           team_size_ * m_thread_scratch_size[level];
+  }
+
+  size_t team_scratch_size(int level) const {
+    return m_team_scratch_size[level];
+  }
+
+  size_t thread_scratch_size(int level) const {
+    return m_thread_scratch_size[level];
+  }
+
+  typename traits::execution_space space() const { return m_space; }
+
+  TeamPolicyInternal()
+      : m_space(typename traits::execution_space()),
+        m_league_size(0),
+        m_team_size(-1),
+        m_vector_length(0),
+        m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(vector_length_max()),
+        m_tune_team_size(false),
+        m_tune_vector_length(false) {}
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     int team_size_request, int vector_length_request = 1)
+      : m_space(space_),
+        m_league_size(league_size_),
+        m_team_size(team_size_request),
+        m_vector_length(
+            (vector_length_request > 0)
+                ? verify_requested_vector_length(vector_length_request)
+                : (verify_requested_vector_length(1))),
+        m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(vector_length_max()),
+        m_tune_team_size(bool(team_size_request <= 0)),
+        m_tune_vector_length(bool(vector_length_request <= 0)) {
+    // FIXME_SYCL Check that league size is permissible,
+    // https://github.com/intel/llvm/pull/4064
+
+    // Make sure total block size is permissible
+    if (m_team_size * m_vector_length >
+        static_cast<int>(
+            m_space.impl_internal_space_instance()->m_maxWorkgroupSize)) {
+      Impl::throw_runtime_exception(
+          std::string("Kokkos::TeamPolicy<SYCL> the team size is too large. "
+                      "Team size x vector length is " +
+                      std::to_string(m_team_size * m_vector_length) +
+                      " but must be smaller than ") +
+          std::to_string(
+              m_space.impl_internal_space_instance()->m_maxWorkgroupSize));
+    }
+  }
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(space_, league_size_, -1, vector_length_request) {}
+  // FLAG
+  /** \brief  Specify league size and team size, request vector length*/
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+                     )
+      : TeamPolicyInternal(space_, league_size_, team_size_request, -1)
+
+  {}
+
+  /** \brief  Specify league size, request team size and vector length*/
+  TeamPolicyInternal(const execution_space space_, int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+
+                     )
+      : TeamPolicyInternal(space_, league_size_, -1, -1)
+
+  {}
+
+  TeamPolicyInternal(int league_size_, int team_size_request,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, vector_length_request) {}
+
+  TeamPolicyInternal(int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+                           vector_length_request) {}
+
+  /** \brief  Specify league size and team size, request vector length*/
+  TeamPolicyInternal(int league_size_, int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+
+                     )
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_,
+                           team_size_request, -1)
+
+  {}
+
+  /** \brief  Specify league size, request team size and vector length*/
+  TeamPolicyInternal(int league_size_,
+                     const Kokkos::AUTO_t& /* team_size_request */,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+
+                     )
+      : TeamPolicyInternal(typename traits::execution_space(), league_size_, -1,
+                           -1) {}
+
+  int chunk_size() const { return m_chunk_size; }
+
+  TeamPolicyInternal& set_chunk_size(typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  /** \brief set per team scratch size for a specific level of the scratch
+   * hierarchy */
+  TeamPolicyInternal& set_scratch_size(int level,
+                                       PerTeamValue const& per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  /** \brief set per thread scratch size for a specific level of the scratch
+   * hierarchy */
+  TeamPolicyInternal& set_scratch_size(int level,
+                                       PerThreadValue const& per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  /** \brief set per thread and per team scratch size for a specific level of
+   * the scratch hierarchy */
+  TeamPolicyInternal& set_scratch_size(int level, PerTeamValue const& per_team,
+                                       PerThreadValue const& per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  using member_type = Kokkos::Impl::SYCLTeamMember;
+
+ protected:
+  template <class FunctorType>
+  int internal_team_size_max_for(const FunctorType& /*f*/) const {
+    // nested_reducer_memsize = (sizeof(double) * (m_team_size + 2)
+    // custom: m_team_scratch_size[0] + m_thread_scratch_size[0] * m_team_size
+    // total:
+    // 2*sizeof(double)+m_team_scratch_size[0]
+    // + m_team_size(sizeof(double)+m_thread_scratch_size[0])
+    const int max_threads_for_memory =
+        (space().impl_internal_space_instance()->m_maxShmemPerBlock -
+         2 * sizeof(double) - m_team_scratch_size[0]) /
+        (sizeof(double) + m_thread_scratch_size[0]);
+    return std::min({
+             int(m_space.impl_internal_space_instance()->m_maxWorkgroupSize),
+      // FIXME_SYCL Avoid requesting to many registers on NVIDIA GPUs.
+#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
+    defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) ||   \
+    defined(KOKKOS_ARCH_TURING75) || defined(KOKKOS_ARCH_AMPERE)
+                 256,
+#endif
+                 max_threads_for_memory
+           }) /
+           impl_vector_length();
+  }
+
+  template <class FunctorType>
+  int internal_team_size_max_reduce(const FunctorType& f) const {
+    using Analysis        = FunctorAnalysis<FunctorPatternInterface::REDUCE,
+                                     TeamPolicyInternal, FunctorType>;
+    using value_type      = typename Analysis::value_type;
+    const int value_count = Analysis::value_count(f);
+
+    // nested_reducer_memsize = (sizeof(double) * (m_team_size + 2)
+    // reducer_memsize = sizeof(value_type) * m_team_size * value_count
+    // custom: m_team_scratch_size[0] + m_thread_scratch_size[0] * m_team_size
+    // total:
+    // 2*sizeof(double)+m_team_scratch_size[0]
+    // + m_team_size(sizeof(double)+sizeof(value_type)*value_count
+    //               +m_thread_scratch_size[0])
+    const int max_threads_for_memory =
+        (space().impl_internal_space_instance()->m_maxShmemPerBlock -
+         2 * sizeof(double) - m_team_scratch_size[0]) /
+        (sizeof(double) + sizeof(value_type) * value_count +
+         m_thread_scratch_size[0]);
+    return std::min<int>({
+             int(m_space.impl_internal_space_instance()->m_maxWorkgroupSize),
+      // FIXME_SYCL Avoid requesting to many registers on NVIDIA GPUs.
+#if defined(KOKKOS_ARCH_KEPLER) || defined(KOKKOS_ARCH_MAXWELL) || \
+    defined(KOKKOS_ARCH_PASCAL) || defined(KOKKOS_ARCH_VOLTA) ||   \
+    defined(KOKKOS_ARCH_TURING75) || defined(KOKKOS_ARCH_AMPERE)
+                 256,
+#endif
+                 max_threads_for_memory
+           }) /
+           impl_vector_length();
+  }
+
+  template <class FunctorType>
+  int internal_team_size_recommended_for(const FunctorType& f) const {
+    // FIXME_SYCL improve
+    return 1 << Kokkos::Impl::int_log2(internal_team_size_max_for(f));
+  }
+
+  template <class FunctorType>
+  int internal_team_size_recommended_reduce(const FunctorType& f) const {
+    // FIXME_SYCL improve
+    return 1 << Kokkos::Impl::int_log2(internal_team_size_max_reduce(f));
+  }
+};
+
+template <typename FunctorType, typename... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::Experimental::SYCL> {
+ public:
+  using Policy = TeamPolicyInternal<Kokkos::Experimental::SYCL, Properties...>;
+  using functor_type = FunctorType;
+  using size_type    = ::Kokkos::Experimental::SYCL::size_type;
+
+ private:
+  using member_type   = typename Policy::member_type;
+  using work_tag      = typename Policy::work_tag;
+  using launch_bounds = typename Policy::launch_bounds;
+
+  FunctorType const m_functor;
+  Policy const m_policy;
+  size_type const m_league_size;
+  int m_team_size;
+  size_type const m_vector_size;
+  int m_shmem_begin;
+  int m_shmem_size;
+  sycl::device_ptr<char> m_global_scratch_ptr;
+  size_t m_scratch_size[2];
+  // Only let one ParallelFor/Reduce modify the team scratch memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::scoped_lock<std::mutex> m_scratch_lock;
+
+  template <typename FunctorWrapper>
+  sycl::event sycl_direct_launch(const Policy& policy,
+                                 const FunctorWrapper& functor_wrapper,
+                                 const sycl::event& memcpy_events) const {
+    // Convenience references
+    const Kokkos::Experimental::SYCL& space = policy.space();
+    sycl::queue& q                          = space.sycl_queue();
+
+    auto parallel_for_event = q.submit([&](sycl::handler& cgh) {
+      // FIXME_SYCL accessors seem to need a size greater than zero at least for
+      // host queues
+      sycl::accessor<char, 1, sycl::access::mode::read_write,
+                     sycl::access::target::local>
+          team_scratch_memory_L0(
+              sycl::range<1>(
+                  std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
+              cgh);
+
+      // Avoid capturing *this since it might not be trivially copyable
+      const auto shmem_begin       = m_shmem_begin;
+      const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
+      sycl::device_ptr<char> const global_scratch_ptr = m_global_scratch_ptr;
+
+      auto lambda = [=](sycl::nd_item<2> item) {
+        const member_type team_member(
+            team_scratch_memory_L0.get_pointer(), shmem_begin, scratch_size[0],
+            global_scratch_ptr + item.get_group(1) * scratch_size[1],
+            scratch_size[1], item);
+        if constexpr (std::is_void<work_tag>::value)
+          functor_wrapper.get_functor()(team_member);
+        else
+          functor_wrapper.get_functor()(work_tag(), team_member);
+      };
+
+      static sycl::kernel kernel = [&] {
+        sycl::kernel_id functor_kernel_id =
+            sycl::get_kernel_id<decltype(lambda)>();
+        auto kernel_bundle =
+            sycl::get_kernel_bundle<sycl::bundle_state::executable>(
+                q.get_context(), std::vector{functor_kernel_id});
+        return kernel_bundle.get_kernel(functor_kernel_id);
+      }();
+      auto max_sg_size =
+          kernel
+              .get_info<sycl::info::kernel_device_specific::max_sub_group_size>(
+                  q.get_device(),
+                  sycl::range<3>(m_team_size, m_vector_size, 1));
+      auto final_vector_size = std::min<int>(m_vector_size, max_sg_size);
+      // FIXME_SYCL For some reason, explicitly enforcing the kernel bundle to
+      // be used gives a runtime error.
+      // cgh.use_kernel_bundle(kernel_bundle);
+
+      cgh.depends_on(memcpy_events);
+      cgh.parallel_for(
+          sycl::nd_range<2>(
+              sycl::range<2>(m_team_size, m_league_size * final_vector_size),
+              sycl::range<2>(m_team_size, final_vector_size)),
+          lambda);
+    });
+    q.ext_oneapi_submit_barrier(std::vector<sycl::event>{parallel_for_event});
+    return parallel_for_event;
+  }
+
+ public:
+  inline void execute() const {
+    if (m_league_size == 0) return;
+
+    Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem&
+        indirectKernelMem = m_policy.space()
+                                .impl_internal_space_instance()
+                                ->get_indirect_kernel_mem();
+
+    auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_functor, indirectKernelMem);
+
+    sycl::event event = sycl_direct_launch(m_policy, functor_wrapper,
+                                           functor_wrapper.get_copy_event());
+    functor_wrapper.register_event(event);
+  }
+
+  ParallelFor(FunctorType const& arg_functor, Policy const& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()),
+        m_scratch_lock(arg_policy.space()
+                           .impl_internal_space_instance()
+                           ->m_team_scratch_mutex) {
+    // FIXME_SYCL optimize
+    if (m_team_size < 0)
+      m_team_size =
+          m_policy.team_size_recommended(arg_functor, ParallelForTag{});
+
+    m_shmem_begin = (sizeof(double) * (m_team_size + 2));
+    m_shmem_size =
+        (m_policy.scratch_size(0, m_team_size) +
+         FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
+    m_scratch_size[0] = m_shmem_size;
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+    auto& space = *m_policy.space().impl_internal_space_instance();
+    m_global_scratch_ptr =
+        static_cast<sycl::device_ptr<char>>(space.resize_team_scratch_space(
+            static_cast<ptrdiff_t>(m_scratch_size[1]) * m_league_size));
+
+    if (static_cast<int>(space.m_maxShmemPerBlock) <
+        m_shmem_size - m_shmem_begin) {
+      std::stringstream out;
+      out << "Kokkos::Impl::ParallelFor<SYCL> insufficient shared memory! "
+             "Requested "
+          << m_shmem_size - m_shmem_begin << " bytes but maximum is "
+          << space.m_maxShmemPerBlock << '\n';
+      Kokkos::Impl::throw_runtime_exception(out.str());
+    }
+
+    const auto max_team_size =
+        m_policy.team_size_max(arg_functor, ParallelForTag{});
+    if (m_team_size > m_policy.team_size_max(arg_functor, ParallelForTag{}))
+      Kokkos::Impl::throw_runtime_exception(
+          "Kokkos::Impl::ParallelFor<SYCL> requested too large team size. The "
+          "maximal team_size is " +
+          std::to_string(max_team_size) + '!');
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::Experimental::SYCL> {
+ public:
+  using Policy = TeamPolicyInternal<Kokkos::Experimental::SYCL, Properties...>;
+
+ private:
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+  using member_type   = typename Policy::member_type;
+  using WorkTag       = typename Policy::work_tag;
+  using launch_bounds = typename Policy::launch_bounds;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+  using value_type     = typename Analysis::value_type;
+
+ public:
+  using functor_type = FunctorType;
+  using size_type    = Kokkos::Experimental::SYCL::size_type;
+
+ private:
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const bool m_result_ptr_device_accessible;
+  size_type m_shmem_begin;
+  size_type m_shmem_size;
+  sycl::device_ptr<char> m_global_scratch_ptr;
+  size_t m_scratch_size[2];
+  const size_type m_league_size;
+  int m_team_size;
+  const size_type m_vector_size;
+  // Only let one ParallelFor/Reduce modify the team scratch memory. The
+  // constructor acquires the mutex which is released in the destructor.
+  std::scoped_lock<std::mutex> m_scratch_lock;
+
+  template <typename PolicyType, typename FunctorWrapper,
+            typename ReducerWrapper>
+  sycl::event sycl_direct_launch(
+      const PolicyType& policy, const FunctorWrapper& functor_wrapper,
+      const ReducerWrapper& reducer_wrapper,
+      const std::vector<sycl::event>& memcpy_events) const {
+    // Convenience references
+    const Kokkos::Experimental::SYCL& space = policy.space();
+    Kokkos::Experimental::Impl::SYCLInternal& instance =
+        *space.impl_internal_space_instance();
+    sycl::queue& q = space.sycl_queue();
+
+    const unsigned int value_count =
+        Analysis::value_count(ReducerConditional::select(m_functor, m_reducer));
+    std::size_t size = std::size_t(m_league_size) * m_team_size * m_vector_size;
+    value_type* results_ptr = nullptr;
+
+    sycl::event last_reduction_event;
+
+    // If size<=1 we only call init(), the functor and possibly final once
+    // working with the global scratch memory but don't copy back to
+    // m_result_ptr yet.
+    if (size <= 1) {
+      results_ptr =
+          static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
+              sizeof(value_type) * std::max(value_count, 1u)));
+      sycl::global_ptr<value_type> device_accessible_result_ptr =
+          m_result_ptr_device_accessible ? m_result_ptr : nullptr;
+
+      auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
+        // FIXME_SYCL accessors seem to need a size greater than zero at least
+        // for host queues
+        sycl::accessor<char, 1, sycl::access::mode::read_write,
+                       sycl::access::target::local>
+            team_scratch_memory_L0(
+                sycl::range<1>(
+                    std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
+                cgh);
+
+        // Avoid capturing *this since it might not be trivially copyable
+        const auto shmem_begin       = m_shmem_begin;
+        const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
+        sycl::device_ptr<char> const global_scratch_ptr = m_global_scratch_ptr;
+
+        cgh.depends_on(memcpy_events);
+        cgh.parallel_for(
+            sycl::nd_range<2>(sycl::range<2>(1, 1), sycl::range<2>(1, 1)),
+            [=](sycl::nd_item<2> item) {
+              const auto& functor          = functor_wrapper.get_functor();
+              const auto& selected_reducer = ReducerConditional::select(
+                  static_cast<const FunctorType&>(functor),
+                  static_cast<const ReducerType&>(
+                      reducer_wrapper.get_functor()));
+              typename Analysis::Reducer final_reducer(&selected_reducer);
+
+              reference_type update = final_reducer.init(results_ptr);
+              if (size == 1) {
+                const member_type team_member(
+                    team_scratch_memory_L0.get_pointer(), shmem_begin,
+                    scratch_size[0], global_scratch_ptr, scratch_size[1], item);
+                if constexpr (std::is_void<WorkTag>::value)
+                  functor(team_member, update);
+                else
+                  functor(WorkTag(), team_member, update);
+              }
+              final_reducer.final(results_ptr);
+              if (device_accessible_result_ptr)
+                final_reducer.copy(device_accessible_result_ptr,
+                                   &results_ptr[0]);
+            });
+      });
+      q.ext_oneapi_submit_barrier(
+          std::vector<sycl::event>{parallel_reduce_event});
+      last_reduction_event = parallel_reduce_event;
+    } else {
+      // Otherwise, (if the total range has more than one element) we perform a
+      // reduction on the values in all workgroups separately, write the
+      // workgroup results back to global memory and recurse until only one
+      // workgroup does the reduction and thus gets the final value.
+      auto parallel_reduce_event = q.submit([&](sycl::handler& cgh) {
+        auto scratch_flags = static_cast<sycl::device_ptr<unsigned int>>(
+            instance.scratch_flags(sizeof(unsigned int)));
+
+        // FIXME_SYCL accessors seem to need a size greater than zero at least
+        // for host queues
+        sycl::accessor<char, 1, sycl::access::mode::read_write,
+                       sycl::access::target::local>
+            team_scratch_memory_L0(
+                sycl::range<1>(
+                    std::max(m_scratch_size[0] + m_shmem_begin, size_t(1))),
+                cgh);
+
+        // Avoid capturing *this since it might not be trivially copyable
+        const auto shmem_begin       = m_shmem_begin;
+        const size_t scratch_size[2] = {m_scratch_size[0], m_scratch_size[1]};
+        sycl::device_ptr<char> const global_scratch_ptr = m_global_scratch_ptr;
+
+        auto team_reduction_factory =
+            [&](sycl::accessor<value_type, 1, sycl::access::mode::read_write,
+                               sycl::access::target::local>
+                    local_mem,
+                sycl::device_ptr<value_type> results_ptr) mutable {
+              sycl::global_ptr<value_type> device_accessible_result_ptr =
+                  m_result_ptr_device_accessible ? m_result_ptr : nullptr;
+              auto lambda = [=](sycl::nd_item<2> item) {
+                auto n_wgroups =
+                    item.get_group_range()[0] * item.get_group_range()[1];
+                auto wgroup_size =
+                    item.get_local_range()[0] * item.get_local_range()[1];
+                auto size = n_wgroups * wgroup_size;
+
+                auto& num_teams_done = reinterpret_cast<unsigned int&>(
+                    local_mem[wgroup_size * std::max(value_count, 1u)]);
+                const auto local_id          = item.get_local_linear_id();
+                const auto& functor          = functor_wrapper.get_functor();
+                const auto& selected_reducer = ReducerConditional::select(
+                    static_cast<const FunctorType&>(functor),
+                    static_cast<const ReducerType&>(
+                        reducer_wrapper.get_functor()));
+                typename Analysis::Reducer final_reducer(&selected_reducer);
+
+                if constexpr (Analysis::StaticValueSize == 0) {
+                  reference_type update =
+                      final_reducer.init(&local_mem[local_id * value_count]);
+                  const member_type team_member(
+                      team_scratch_memory_L0.get_pointer(), shmem_begin,
+                      scratch_size[0],
+                      global_scratch_ptr + item.get_group(1) * scratch_size[1],
+                      scratch_size[1], item);
+                  if constexpr (std::is_void<WorkTag>::value)
+                    functor(team_member, update);
+                  else
+                    functor(WorkTag(), team_member, update);
+                  item.barrier(sycl::access::fence_space::local_space);
+
+                  SYCLReduction::workgroup_reduction<>(
+                      item, local_mem.get_pointer(), results_ptr,
+                      device_accessible_result_ptr, value_count,
+                      selected_reducer, false,
+                      std::min<std::size_t>(size,
+                                            item.get_local_range()[0] *
+                                                item.get_local_range()[1]));
+
+                  if (local_id == 0) {
+                    sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
+                                     sycl::memory_scope::device,
+                                     sycl::access::address_space::global_space>
+                        scratch_flags_ref(*scratch_flags);
+                    num_teams_done = ++scratch_flags_ref;
+                  }
+                  sycl::group_barrier(item.get_group());
+                  if (num_teams_done == n_wgroups) {
+                    if (local_id >= n_wgroups)
+                      final_reducer.init(&local_mem[local_id * value_count]);
+                    else {
+                      final_reducer.copy(&local_mem[local_id * value_count],
+                                         &results_ptr[local_id * value_count]);
+                      for (unsigned int id = local_id + wgroup_size;
+                           id < n_wgroups; id += wgroup_size) {
+                        final_reducer.join(&local_mem[local_id * value_count],
+                                           &results_ptr[id * value_count]);
+                      }
+                    }
+
+                    SYCLReduction::workgroup_reduction<>(
+                        item, local_mem.get_pointer(), results_ptr,
+                        device_accessible_result_ptr, value_count,
+                        selected_reducer, true,
+                        std::min(n_wgroups, item.get_local_range()[0] *
+                                                item.get_local_range()[1]));
+                  }
+                } else {
+                  value_type local_value;
+                  reference_type update = final_reducer.init(&local_value);
+                  const member_type team_member(
+                      team_scratch_memory_L0.get_pointer(), shmem_begin,
+                      scratch_size[0],
+                      global_scratch_ptr + item.get_group(1) * scratch_size[1],
+                      scratch_size[1], item);
+                  if constexpr (std::is_void<WorkTag>::value)
+                    functor(team_member, update);
+                  else
+                    functor(WorkTag(), team_member, update);
+
+                  SYCLReduction::workgroup_reduction<>(
+                      item, local_mem.get_pointer(), local_value, results_ptr,
+                      device_accessible_result_ptr, final_reducer, false,
+                      std::min<std::size_t>(size,
+                                            item.get_local_range()[0] *
+                                                item.get_local_range()[1]));
+
+                  if (local_id == 0) {
+                    sycl::atomic_ref<unsigned, sycl::memory_order::relaxed,
+                                     sycl::memory_scope::device,
+                                     sycl::access::address_space::global_space>
+                        scratch_flags_ref(*scratch_flags);
+                    num_teams_done = ++scratch_flags_ref;
+                  }
+                  item.barrier(sycl::access::fence_space::local_space);
+                  if (num_teams_done == n_wgroups) {
+                    if (local_id >= n_wgroups)
+                      final_reducer.init(&local_value);
+                    else {
+                      local_value = results_ptr[local_id];
+                      for (unsigned int id = local_id + wgroup_size;
+                           id < n_wgroups; id += wgroup_size) {
+                        final_reducer.join(&local_value, &results_ptr[id]);
+                      }
+                    }
+
+                    SYCLReduction::workgroup_reduction<>(
+                        item, local_mem.get_pointer(), local_value, results_ptr,
+                        device_accessible_result_ptr, final_reducer, true,
+                        std::min(n_wgroups, item.get_local_range()[0] *
+                                                item.get_local_range()[1]));
+                  }
+                }
+              };
+              return lambda;
+            };
+
+        auto dummy_reduction_lambda = team_reduction_factory({1, cgh}, nullptr);
+
+        static sycl::kernel kernel = [&] {
+          sycl::kernel_id functor_kernel_id =
+              sycl::get_kernel_id<decltype(dummy_reduction_lambda)>();
+          auto kernel_bundle =
+              sycl::get_kernel_bundle<sycl::bundle_state::executable>(
+                  q.get_context(), std::vector{functor_kernel_id});
+          return kernel_bundle.get_kernel(functor_kernel_id);
+        }();
+        auto max_sg_size = kernel.get_info<
+            sycl::info::kernel_device_specific::max_sub_group_size>(
+            q.get_device(), sycl::range<3>(m_team_size, m_vector_size, 1));
+        auto final_vector_size = std::min<int>(m_vector_size, max_sg_size);
+        // FIXME_SYCL For some reason, explicitly enforcing the kernel bundle to
+        // be used gives a runtime error.
+
+        //     cgh.use_kernel_bundle(kernel_bundle);
+
+        auto wgroup_size = m_team_size * final_vector_size;
+        std::size_t size = std::size_t(m_league_size) * wgroup_size;
+        sycl::accessor<value_type, 1, sycl::access::mode::read_write,
+                       sycl::access::target::local>
+            local_mem(sycl::range<1>(wgroup_size) * std::max(value_count, 1u) +
+                          (sizeof(unsigned int) + sizeof(value_type) - 1) /
+                              sizeof(value_type),
+                      cgh);
+
+        const auto init_size =
+            std::max<std::size_t>((size + wgroup_size - 1) / wgroup_size, 1);
+        results_ptr =
+            static_cast<sycl::device_ptr<value_type>>(instance.scratch_space(
+                sizeof(value_type) * std::max(value_count, 1u) * init_size));
+
+        auto reduction_lambda = team_reduction_factory(local_mem, results_ptr);
+
+        cgh.depends_on(memcpy_events);
+
+        cgh.parallel_for(
+            sycl::nd_range<2>(
+                sycl::range<2>(m_team_size, m_league_size * m_vector_size),
+                sycl::range<2>(m_team_size, m_vector_size)),
+            reduction_lambda);
+      });
+      last_reduction_event       = q.ext_oneapi_submit_barrier(
+          std::vector<sycl::event>{parallel_reduce_event});
+    }
+
+    // At this point, the reduced value is written to the entry in results_ptr
+    // and all that is left is to copy it back to the given result pointer if
+    // necessary.
+    if (m_result_ptr && !m_result_ptr_device_accessible) {
+      Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                             Kokkos::Experimental::SYCLDeviceUSMSpace>(
+          space, m_result_ptr, results_ptr,
+          sizeof(*m_result_ptr) * value_count);
+    }
+
+    return last_reduction_event;
+  }
+
+ public:
+  inline void execute() {
+    Kokkos::Experimental::Impl::SYCLInternal& instance =
+        *m_policy.space().impl_internal_space_instance();
+    using IndirectKernelMem =
+        Kokkos::Experimental::Impl::SYCLInternal::IndirectKernelMem;
+    IndirectKernelMem& indirectKernelMem  = instance.get_indirect_kernel_mem();
+    IndirectKernelMem& indirectReducerMem = instance.get_indirect_kernel_mem();
+
+    auto functor_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_functor, indirectKernelMem);
+    auto reducer_wrapper = Experimental::Impl::make_sycl_function_wrapper(
+        m_reducer, indirectReducerMem);
+
+    sycl::event event = sycl_direct_launch(
+        m_policy, functor_wrapper, reducer_wrapper,
+        {functor_wrapper.get_copy_event(), reducer_wrapper.get_copy_event()});
+    functor_wrapper.register_event(event);
+    reducer_wrapper.register_event(event);
+  }
+
+ private:
+  void initialize() {
+    // FIXME_SYCL optimize
+    if (m_team_size < 0)
+      m_team_size =
+          m_policy.team_size_recommended(m_functor, ParallelReduceTag{});
+    // Must be a power of two greater than two, get the one not bigger than the
+    // requested one.
+    if ((m_team_size & m_team_size - 1) || m_team_size < 2) {
+      int temp_team_size = 2;
+      while ((temp_team_size << 1) < m_team_size) temp_team_size <<= 1;
+      m_team_size = temp_team_size;
+    }
+
+    m_shmem_begin = (sizeof(double) * (m_team_size + 2));
+    m_shmem_size =
+        (m_policy.scratch_size(0, m_team_size) +
+         FunctorTeamShmemSize<FunctorType>::value(m_functor, m_team_size));
+    m_scratch_size[0] = m_shmem_size;
+    m_scratch_size[1] = m_policy.scratch_size(1, m_team_size);
+
+    // Functor's reduce memory, team scan memory, and team shared memory depend
+    // upon team size.
+    auto& space = *m_policy.space().impl_internal_space_instance();
+    m_global_scratch_ptr =
+        static_cast<sycl::device_ptr<char>>(space.resize_team_scratch_space(
+            static_cast<ptrdiff_t>(m_scratch_size[1]) * m_league_size));
+
+    if (static_cast<int>(space.m_maxShmemPerBlock) <
+        m_shmem_size - m_shmem_begin) {
+      std::stringstream out;
+      out << "Kokkos::Impl::ParallelFor<SYCL> insufficient shared memory! "
+             "Requested "
+          << m_shmem_size - m_shmem_begin << " bytes but maximum is "
+          << space.m_maxShmemPerBlock << '\n';
+      Kokkos::Impl::throw_runtime_exception(out.str());
+    }
+
+    if (m_team_size > m_policy.team_size_max(m_functor, ParallelReduceTag{}))
+      Kokkos::Impl::throw_runtime_exception(
+          "Kokkos::Impl::ParallelFor<SYCL> requested too large team size.");
+  }
+
+ public:
+  template <class ViewType>
+  ParallelReduce(
+      FunctorType const& arg_functor, Policy const& arg_policy,
+      ViewType const& arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value, void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                              typename ViewType::memory_space>::accessible),
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()),
+        m_scratch_lock(arg_policy.space()
+                           .impl_internal_space_instance()
+                           ->m_team_scratch_mutex) {
+    initialize();
+  }
+
+  ParallelReduce(FunctorType const& arg_functor, Policy const& arg_policy,
+                 ReducerType const& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_result_ptr_device_accessible(
+            MemorySpaceAccess<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                              typename ReducerType::result_view_type::
+                                  memory_space>::accessible),
+        m_league_size(arg_policy.league_size()),
+        m_team_size(arg_policy.team_size()),
+        m_vector_size(arg_policy.impl_vector_length()),
+        m_scratch_lock(arg_policy.space()
+                           .impl_internal_space_instance()
+                           ->m_team_scratch_mutex) {
+    initialize();
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Space.cpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Space.cpp
new file mode 100644 (file)
index 0000000..07ca907
--- /dev/null
@@ -0,0 +1,498 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_HostSpace.hpp>
+#include <Kokkos_SYCL.hpp>
+#include <Kokkos_SYCL_Space.hpp>
+#include <SYCL/Kokkos_SYCL_DeepCopy.hpp>
+#include <SYCL/Kokkos_SYCL_Instance.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+#include <impl/Kokkos_Profiling.hpp>
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+namespace Kokkos {
+namespace Impl {
+
+void DeepCopySYCL(void* dst, const void* src, size_t n) {
+  Experimental::Impl::SYCLInternal::singleton().m_queue->memcpy(dst, src, n);
+}
+
+void DeepCopyAsyncSYCL(const Kokkos::Experimental::SYCL& instance, void* dst,
+                       const void* src, size_t n) {
+  // FIXME_SYCL memcpy doesn't respect submit_barrier which means that we need
+  // to actually fence the execution space to make sure the memcpy is properly
+  // enqueued when using out-of-order queues.
+  sycl::queue& q = *instance.impl_internal_space_instance()->m_queue;
+  q.wait_and_throw();
+  auto event = q.memcpy(dst, src, n);
+  q.ext_oneapi_submit_barrier(std::vector<sycl::event>{event});
+}
+
+void DeepCopyAsyncSYCL(void* dst, const void* src, size_t n) {
+  Experimental::Impl::SYCLInternal::singleton().m_queue->memcpy(dst, src, n);
+  Experimental::SYCL().fence(
+      "Kokkos::Impl::DeepCopyAsyncSYCL: fence after memcpy");
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Experimental {
+
+SYCLDeviceUSMSpace::SYCLDeviceUSMSpace()
+    : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
+SYCLDeviceUSMSpace::SYCLDeviceUSMSpace(sycl::queue queue)
+    : m_queue(std::move(queue)) {}
+
+SYCLSharedUSMSpace::SYCLSharedUSMSpace()
+    : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
+SYCLSharedUSMSpace::SYCLSharedUSMSpace(sycl::queue queue)
+    : m_queue(std::move(queue)) {}
+
+SYCLHostUSMSpace::SYCLHostUSMSpace()
+    : m_queue(*SYCL().impl_internal_space_instance()->m_queue) {}
+SYCLHostUSMSpace::SYCLHostUSMSpace(sycl::queue queue)
+    : m_queue(std::move(queue)) {}
+
+void* allocate_sycl(
+    const char* arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size, const Kokkos::Tools::SpaceHandle arg_handle,
+    const RawMemoryAllocationFailure::AllocationMechanism failure_tag,
+    const sycl::usm::alloc allocation_kind, const sycl::queue& queue) {
+  void* const hostPtr = sycl::malloc(arg_alloc_size, queue, allocation_kind);
+
+  if (hostPtr == nullptr)
+    throw RawMemoryAllocationFailure(
+        arg_alloc_size, 1, RawMemoryAllocationFailure::FailureMode::Unknown,
+        failure_tag);
+
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, hostPtr,
+                                    reported_size);
+  }
+
+  return hostPtr;
+}
+
+void* SYCLDeviceUSMSpace::allocate(const Kokkos::Experimental::SYCL& exec_space,
+                                   const size_t arg_alloc_size) const {
+  return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+
+void* SYCLDeviceUSMSpace::allocate(const Kokkos::Experimental::SYCL& exec_space,
+                                   const char* arg_label,
+                                   const size_t arg_alloc_size,
+                                   const size_t arg_logical_size) const {
+  return allocate_sycl(
+      arg_label, arg_alloc_size, arg_logical_size,
+      Kokkos::Tools::make_space_handle(name()),
+      RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocDevice,
+      sycl::usm::alloc::device,
+      *exec_space.impl_internal_space_instance()->m_queue);
+}
+
+void* SYCLDeviceUSMSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+
+void* SYCLDeviceUSMSpace::allocate(const char* arg_label,
+                                   const size_t arg_alloc_size,
+                                   const size_t arg_logical_size) const {
+  return allocate_sycl(
+      arg_label, arg_alloc_size, arg_logical_size,
+      Kokkos::Tools::make_space_handle(name()),
+      RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocDevice,
+      sycl::usm::alloc::device, m_queue);
+}
+
+void* SYCLSharedUSMSpace::allocate(const SYCL& exec_space,
+                                   const size_t arg_alloc_size) const {
+  return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+void* SYCLSharedUSMSpace::allocate(const SYCL& exec_space,
+                                   const char* arg_label,
+                                   const size_t arg_alloc_size,
+                                   const size_t arg_logical_size) const {
+  return allocate_sycl(
+      arg_label, arg_alloc_size, arg_logical_size,
+      Kokkos::Tools::make_space_handle(name()),
+      RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocShared,
+      sycl::usm::alloc::shared,
+      *exec_space.impl_internal_space_instance()->m_queue);
+}
+
+void* SYCLSharedUSMSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void* SYCLSharedUSMSpace::allocate(const char* arg_label,
+                                   const size_t arg_alloc_size,
+                                   const size_t arg_logical_size) const {
+  return allocate_sycl(
+      arg_label, arg_alloc_size, arg_logical_size,
+      Kokkos::Tools::make_space_handle(name()),
+      RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocShared,
+      sycl::usm::alloc::shared, m_queue);
+}
+
+void* SYCLHostUSMSpace::allocate(const SYCL& exec_space,
+                                 const size_t arg_alloc_size) const {
+  return allocate(exec_space, "[unlabeled]", arg_alloc_size);
+}
+void* SYCLHostUSMSpace::allocate(const SYCL& exec_space, const char* arg_label,
+                                 const size_t arg_alloc_size,
+                                 const size_t arg_logical_size) const {
+  return allocate_sycl(
+      arg_label, arg_alloc_size, arg_logical_size,
+      Kokkos::Tools::make_space_handle(name()),
+      RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocHost,
+      sycl::usm::alloc::host,
+      *exec_space.impl_internal_space_instance()->m_queue);
+}
+
+void* SYCLHostUSMSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void* SYCLHostUSMSpace::allocate(const char* arg_label,
+                                 const size_t arg_alloc_size,
+                                 const size_t arg_logical_size) const {
+  return allocate_sycl(
+      arg_label, arg_alloc_size, arg_logical_size,
+      Kokkos::Tools::make_space_handle(name()),
+      RawMemoryAllocationFailure::AllocationMechanism::SYCLMallocHost,
+      sycl::usm::alloc::host, m_queue);
+}
+
+void sycl_deallocate(const char* arg_label, void* const arg_alloc_ptr,
+                     const size_t arg_alloc_size, const size_t arg_logical_size,
+                     const Kokkos::Tools::SpaceHandle arg_handle,
+                     const sycl::queue& queue) {
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                      reported_size);
+  }
+
+  SYCL::impl_static_fence(
+      "Kokkos::Impl::sycl_deallocate: fence before deallocate");
+  sycl::free(arg_alloc_ptr, queue);
+}
+
+void SYCLDeviceUSMSpace::deallocate(void* const arg_alloc_ptr,
+                                    const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void SYCLDeviceUSMSpace::deallocate(const char* arg_label,
+                                    void* const arg_alloc_ptr,
+                                    const size_t arg_alloc_size,
+                                    const size_t arg_logical_size) const {
+  sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
+                  Kokkos::Tools::make_space_handle(name()), m_queue);
+}
+
+void SYCLSharedUSMSpace::deallocate(void* const arg_alloc_ptr,
+                                    const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void SYCLSharedUSMSpace::deallocate(const char* arg_label,
+                                    void* const arg_alloc_ptr,
+                                    const size_t arg_alloc_size,
+                                    const size_t arg_logical_size) const {
+  sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
+                  Kokkos::Tools::make_space_handle(name()), m_queue);
+}
+
+void SYCLHostUSMSpace::deallocate(void* const arg_alloc_ptr,
+                                  const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void SYCLHostUSMSpace::deallocate(const char* arg_label,
+                                  void* const arg_alloc_ptr,
+                                  const size_t arg_alloc_size,
+                                  const size_t arg_logical_size) const {
+  sycl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size,
+                  Kokkos::Tools::make_space_handle(name()), m_queue);
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_DEBUG
+SharedAllocationRecord<void, void> SharedAllocationRecord<
+    Kokkos::Experimental::SYCLDeviceUSMSpace, void>::s_root_record;
+
+SharedAllocationRecord<void, void> SharedAllocationRecord<
+    Kokkos::Experimental::SYCLSharedUSMSpace, void>::s_root_record;
+
+SharedAllocationRecord<void, void> SharedAllocationRecord<
+    Kokkos::Experimental::SYCLHostUSMSpace, void>::s_root_record;
+#endif
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::SYCLDeviceUSMSpace& space,
+        const std::string& label, const size_t size,
+        const SharedAllocationRecord<void, void>::function_type dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                                  void>::s_root_record,
+#endif
+          Kokkos::Impl::checked_allocation_with_header(space, label, size),
+          sizeof(SharedAllocationHeader) + size, dealloc, label),
+      m_space(space) {
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, label);
+
+  // Copy to device memory
+  Kokkos::Experimental::SYCL exec;
+  Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace, HostSpace>(
+      exec, RecordBase::m_alloc_ptr, &header, sizeof(SharedAllocationHeader));
+  exec.fence(
+      "SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, "
+      "void>::SharedAllocationRecord(): fence after copying header from "
+      "HostSpace");
+}
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::SYCL& arg_exec_space,
+        const Kokkos::Experimental::SYCLDeviceUSMSpace& space,
+        const std::string& label, const size_t size,
+        const SharedAllocationRecord<void, void>::function_type dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                                  void>::s_root_record,
+#endif
+          Kokkos::Impl::checked_allocation_with_header(arg_exec_space, space,
+                                                       label, size),
+          sizeof(SharedAllocationHeader) + size, dealloc, label),
+      m_space(space) {
+  SharedAllocationHeader header;
+
+  this->base_t::_fill_host_accessible_header_info(header, label);
+
+  // Copy to device memory
+  Kokkos::Impl::DeepCopy<Kokkos::Experimental::SYCLDeviceUSMSpace, HostSpace>(
+      arg_exec_space, RecordBase::m_alloc_ptr, &header,
+      sizeof(SharedAllocationHeader));
+}
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::SYCL& exec_space,
+        const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
+                                  void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(exec_space, arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+
+  this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                  arg_label);
+}
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::SYCLSharedUSMSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
+                                  void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+
+  this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                  arg_label);
+}
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::SYCL& exec_space,
+        const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace,
+                                  void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(exec_space, arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+
+  this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                  arg_label);
+}
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::SYCLHostUSMSpace& arg_space,
+        const std::string& arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace,
+                                  void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+
+  this->base_t::_fill_host_accessible_header_info(*base_t::m_alloc_ptr,
+                                                  arg_label);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLDeviceUSMSpace,
+                       void>::~SharedAllocationRecord() {
+  const auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     alloc_size, alloc_size - sizeof(SharedAllocationHeader));
+}
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLSharedUSMSpace,
+                       void>::~SharedAllocationRecord() {
+  const auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     alloc_size, alloc_size - sizeof(SharedAllocationHeader));
+}
+
+SharedAllocationRecord<Kokkos::Experimental::SYCLHostUSMSpace,
+                       void>::~SharedAllocationRecord() {
+  const auto alloc_size = SharedAllocationRecord<void, void>::m_alloc_size;
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     alloc_size, alloc_size - sizeof(SharedAllocationHeader));
+}
+
+//----------------------------------------------------------------------------
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// To avoid additional compilation cost for something that's (mostly?) not
+// performance sensitive, we explicity instantiate these CRTP base classes here,
+// where we have access to the associated *_timpl.hpp header files.
+template class HostInaccessibleSharedAllocationRecordCommon<
+    Kokkos::Experimental::SYCLDeviceUSMSpace>;
+template class SharedAllocationRecordCommon<
+    Kokkos::Experimental::SYCLDeviceUSMSpace>;
+template class SharedAllocationRecordCommon<
+    Kokkos::Experimental::SYCLSharedUSMSpace>;
+template class SharedAllocationRecordCommon<
+    Kokkos::Experimental::SYCLHostUSMSpace>;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Team.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_Team.hpp
new file mode 100644 (file)
index 0000000..a8c6041
--- /dev/null
@@ -0,0 +1,919 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_TEAM_HPP
+#define KOKKOS_SYCL_TEAM_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#ifdef KOKKOS_ENABLE_SYCL
+
+#include <utility>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/**\brief  Team member_type passed to TeamPolicy or TeamTask closures.
+ */
+class SYCLTeamMember {
+ public:
+  using execution_space      = Kokkos::Experimental::SYCL;
+  using scratch_memory_space = execution_space::scratch_memory_space;
+
+ private:
+  mutable sycl::local_ptr<void> m_team_reduce;
+  scratch_memory_space m_team_shared;
+  int m_team_reduce_size;
+  sycl::nd_item<2> m_item;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_shmem() const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_scratch(
+      const int level) const {
+    return m_team_shared.set_team_thread_mode(level, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& thread_scratch(
+      const int level) const {
+    return m_team_shared.set_team_thread_mode(level, team_size(), team_rank());
+  }
+
+  KOKKOS_INLINE_FUNCTION int league_rank() const {
+    return m_item.get_group_linear_id();
+  }
+  KOKKOS_INLINE_FUNCTION int league_size() const {
+    return m_item.get_group_range(1);
+  }
+  KOKKOS_INLINE_FUNCTION int team_rank() const {
+    return m_item.get_local_id(0);
+  }
+  KOKKOS_INLINE_FUNCTION int team_size() const {
+    return m_item.get_local_range(0);
+  }
+  KOKKOS_INLINE_FUNCTION void team_barrier() const {
+    sycl::group_barrier(m_item.get_group());
+  }
+
+  KOKKOS_INLINE_FUNCTION const sycl::nd_item<2>& item() const { return m_item; }
+
+  //--------------------------------------------------------------------------
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<std::is_trivially_copyable_v<ValueType>>
+      team_broadcast(ValueType& val, const int thread_id) const {
+    val = sycl::group_broadcast(m_item.get_group(), val,
+                                sycl::id<2>(thread_id, 0));
+  }
+
+  // FIXME_SYCL remove/adapt this overload once the Intel oneAPI implementation
+  // is conforming to the SYCL2020 standard (allowing trivially-copyable types)
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!std::is_trivially_copyable_v<ValueType>>
+      team_broadcast(ValueType& val, const int thread_id) const {
+    // Wait for shared data write until all threads arrive here
+    sycl::group_barrier(m_item.get_group());
+    if (m_item.get_local_id(1) == 0 &&
+        static_cast<int>(m_item.get_local_id(0)) == thread_id) {
+      *static_cast<sycl::local_ptr<ValueType>>(m_team_reduce) = val;
+    }
+    // Wait for shared data read until root thread writes
+    sycl::group_barrier(m_item.get_group());
+    val = *static_cast<sycl::local_ptr<ValueType>>(m_team_reduce);
+  }
+
+  template <class Closure, class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(Closure const& f, ValueType& val,
+                                             const int thread_id) const {
+    f(val);
+    team_broadcast(val, thread_id);
+  }
+
+  //--------------------------------------------------------------------------
+  /**\brief  Reduction across a team
+   */
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer) const noexcept {
+    team_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer,
+              typename ReducerType::value_type& value) const noexcept {
+    using value_type = typename ReducerType::value_type;
+
+    auto sg                       = m_item.get_sub_group();
+    const auto sub_group_range    = sg.get_local_range()[0];
+    const auto vector_range       = m_item.get_local_range(1);
+    const unsigned int team_size_ = team_size();
+    const unsigned int team_rank_ = team_rank();
+
+    // First combine the values in the same subgroup
+    for (unsigned int shift = 1; vector_range * shift < sub_group_range;
+         shift <<= 1) {
+      const value_type tmp = sg.shuffle_down(value, vector_range * shift);
+      if (team_rank_ + shift < team_size_) reducer.join(value, tmp);
+    }
+    value = sg.shuffle(value, 0);
+
+    // We need to chunk up the whole reduction because we might not have
+    // allocated enough memory.
+    const auto n_subgroups = sg.get_group_range()[0];
+    const unsigned int maximum_work_range =
+        std::min<int>(m_team_reduce_size / sizeof(value_type), n_subgroups);
+
+    const auto id_in_sg = sg.get_local_id()[0];
+    auto reduction_array =
+        static_cast<sycl::local_ptr<value_type>>(m_team_reduce);
+
+    // Load values into the first maximum_work_range values of the reduction
+    // array in chunks. This means that only sub groups with an id in the
+    // corresponding chunk load values.
+    const auto group_id = sg.get_group_id()[0];
+    if (id_in_sg == 0 && group_id < maximum_work_range)
+      reduction_array[group_id] = value;
+    sycl::group_barrier(m_item.get_group());
+
+    for (unsigned int start = maximum_work_range; start < n_subgroups;
+         start += maximum_work_range) {
+      if (id_in_sg == 0 && group_id >= start &&
+          group_id <
+              std::min<unsigned int>(start + maximum_work_range, n_subgroups))
+        reducer.join(reduction_array[group_id - start], value);
+      sycl::group_barrier(m_item.get_group());
+    }
+
+    // Let the first subgroup do the final reduction
+    if (group_id == 0) {
+      const auto local_range = sg.get_local_range()[0];
+      auto result =
+          reduction_array[id_in_sg < maximum_work_range ? id_in_sg : 0];
+      // In case the maximum_work_range is larger than the range of the first
+      // subgroup, we first combine the items with a higher index.
+      for (unsigned int offset = local_range; offset < maximum_work_range;
+           offset += local_range)
+        if (id_in_sg + offset < maximum_work_range)
+          reducer.join(result, reduction_array[id_in_sg + offset]);
+      sycl::group_barrier(sg);
+
+      // Now do the actual subgroup reduction.
+      const auto min_range =
+          std::min<unsigned int>(maximum_work_range, local_range);
+      for (unsigned int stride = 1; stride < min_range; stride <<= 1) {
+        const auto tmp = sg.shuffle_down(result, stride);
+        if (id_in_sg + stride < min_range) reducer.join(result, tmp);
+      }
+      if (id_in_sg == 0) reduction_array[0] = result;
+    }
+    sycl::group_barrier(m_item.get_group());
+
+    reducer.reference() = reduction_array[0];
+    // Make sure that the reduction array hasn't been modified in the meantime.
+    m_item.barrier(sycl::access::fence_space::local_space);
+  }
+
+  //--------------------------------------------------------------------------
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering
+   *          with intra-team non-deterministic ordering accumulation.
+   *
+   *  The global inter-team accumulation value will, at the end of the
+   *  league's parallel execution, be the scan's total.
+   *  Parallel execution ordering of the league's teams is non-deterministic.
+   *  As such the base value for each team's scan operation is similarly
+   *  non-deterministic.
+   */
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type team_scan(const Type& input_value,
+                                        Type* const global_accum) const {
+    Type value                 = input_value;
+    auto sg                    = m_item.get_sub_group();
+    const auto sub_group_range = sg.get_local_range()[0];
+    const auto vector_range    = m_item.get_local_range(1);
+    const auto id_in_sg        = sg.get_local_id()[0];
+
+    // First combine the values in the same subgroup
+    for (unsigned int stride = 1; vector_range * stride < sub_group_range;
+         stride <<= 1) {
+      auto tmp = sg.shuffle_up(value, vector_range * stride);
+      if (id_in_sg >= vector_range * stride) value += tmp;
+    }
+
+    const auto n_active_subgroups = sg.get_group_range()[0];
+    const auto base_data =
+        static_cast<sycl::local_ptr<Type>>(m_team_reduce).get();
+    if (static_cast<int>(n_active_subgroups * sizeof(Type)) >
+        m_team_reduce_size)
+      Kokkos::abort("Not implemented!");
+
+    const auto group_id = sg.get_group_id()[0];
+    if (id_in_sg == sub_group_range - 1) base_data[group_id] = value;
+    sycl::group_barrier(m_item.get_group());
+
+    // scan subgroup results using the first subgroup
+    if (n_active_subgroups > 1) {
+      if (group_id == 0) {
+        const auto n_rounds =
+            (n_active_subgroups + sub_group_range - 1) / sub_group_range;
+        for (unsigned int round = 0; round < n_rounds; ++round) {
+          const auto idx         = id_in_sg + round * sub_group_range;
+          const auto upper_bound = std::min(
+              sub_group_range, n_active_subgroups - round * sub_group_range);
+          auto local_value = base_data[idx];
+          for (unsigned int stride = 1; stride < upper_bound; stride <<= 1) {
+            auto tmp = sg.shuffle_up(local_value, stride);
+            if (id_in_sg >= stride) {
+              if (idx < n_active_subgroups)
+                local_value += tmp;
+              else
+                local_value = tmp;
+            }
+          }
+          base_data[idx] = local_value;
+          if (round > 0)
+            base_data[idx] += base_data[round * sub_group_range - 1];
+          if (round + 1 < n_rounds) sycl::group_barrier(sg);
+        }
+      }
+      sycl::group_barrier(m_item.get_group());
+    }
+    auto total = base_data[n_active_subgroups - 1];
+
+    const auto update = sg.shuffle_up(value, vector_range);
+    Type intermediate = (group_id > 0 ? base_data[group_id - 1] : 0) +
+                        (id_in_sg >= vector_range ? update : 0);
+
+    if (global_accum) {
+      if (id_in_sg == sub_group_range - 1 &&
+          group_id == n_active_subgroups - 1) {
+        base_data[n_active_subgroups - 1] =
+            atomic_fetch_add(global_accum, total);
+      }
+      sycl::group_barrier(m_item.get_group());  // Wait for atomic
+      intermediate += base_data[n_active_subgroups - 1];
+    }
+    // Make sure that the reduction array hasn't been modified in the meantime.
+    m_item.barrier(sycl::access::fence_space::local_space);
+
+    return intermediate;
+  }
+
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering.
+   *
+   *  The highest rank thread can compute the reduction total as
+   *    reduction_total = dev.team_scan( value ) + value ;
+   */
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION Type team_scan(const Type& value) const {
+    return this->template team_scan<Type>(value, nullptr);
+  }
+
+  //----------------------------------------
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  vector_reduce(ReducerType const& reducer) const {
+    vector_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  vector_reduce(ReducerType const& reducer,
+                typename ReducerType::value_type& value) const {
+    const auto tidx1   = m_item.get_local_id(1);
+    const auto grange1 = m_item.get_local_range(1);
+
+    const auto sg = m_item.get_sub_group();
+
+    if (grange1 == 1) return;
+
+    // Intra vector lane shuffle reduction:
+    typename ReducerType::value_type tmp(value);
+    typename ReducerType::value_type tmp2 = tmp;
+
+    for (int i = grange1; (i >>= 1);) {
+      tmp2 = sg.shuffle_down(tmp, i);
+      if (static_cast<int>(tidx1) < i) {
+        reducer.join(tmp, tmp2);
+      }
+    }
+
+    // Broadcast from root lane to all other lanes.
+    // Cannot use "butterfly" algorithm to avoid the broadcast
+    // because floating point summation is not associative
+    // and thus different threads could have different results.
+
+    tmp2  = sg.shuffle(tmp, (sg.get_local_id() / grange1) * grange1);
+    value = tmp2;
+    reducer.reference() = tmp2;
+  }
+
+  //----------------------------------------
+  // Private for the driver
+
+  KOKKOS_INLINE_FUNCTION
+  SYCLTeamMember(sycl::local_ptr<void> shared, const int shared_begin,
+                 const int shared_size,
+                 sycl::device_ptr<void> scratch_level_1_ptr,
+                 const int scratch_level_1_size, const sycl::nd_item<2> item)
+      : m_team_reduce(shared),
+        m_team_shared(static_cast<sycl::local_ptr<char>>(shared) + shared_begin,
+                      shared_size, scratch_level_1_ptr, scratch_level_1_size),
+        m_team_reduce_size(shared_begin),
+        m_item(item) {}
+
+ public:
+  // Declare to avoid unused private member warnings which are trigger
+  // when SFINAE excludes the member function which uses these variables
+  // Making another class a friend also surpresses these warnings
+  bool impl_avoid_sfinae_warning() const noexcept {
+    return m_team_reduce_size > 0 && m_team_reduce != nullptr;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename iType>
+struct TeamThreadRangeBoundariesStruct<iType, SYCLTeamMember> {
+  using index_type = iType;
+  const SYCLTeamMember& member;
+  const iType start;
+  const iType end;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const SYCLTeamMember& thread_, iType count)
+      : member(thread_), start(0), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamThreadRangeBoundariesStruct(const SYCLTeamMember& thread_, iType begin_,
+                                  iType end_)
+      : member(thread_), start(begin_), end(end_) {}
+};
+
+template <typename iType>
+struct TeamVectorRangeBoundariesStruct<iType, SYCLTeamMember> {
+  using index_type = iType;
+  const SYCLTeamMember& member;
+  const iType start;
+  const iType end;
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const SYCLTeamMember& thread_,
+                                  const iType& count)
+      : member(thread_), start(0), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TeamVectorRangeBoundariesStruct(const SYCLTeamMember& thread_,
+                                  const iType& begin_, const iType& end_)
+      : member(thread_), start(begin_), end(end_) {}
+};
+
+template <typename iType>
+struct ThreadVectorRangeBoundariesStruct<iType, SYCLTeamMember> {
+  using index_type = iType;
+  const SYCLTeamMember& member;
+  const index_type start;
+  const index_type end;
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(const SYCLTeamMember& thread,
+                                    index_type count)
+      : member(thread), start(static_cast<index_type>(0)), end(count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ThreadVectorRangeBoundariesStruct(const SYCLTeamMember& thread,
+                                    index_type arg_begin, index_type arg_end)
+      : member(thread), start(arg_begin), end(arg_end) {}
+};
+
+}  // namespace Impl
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::SYCLTeamMember>
+    TeamThreadRange(const Impl::SYCLTeamMember& thread, iType count) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::SYCLTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::SYCLTeamMember>
+TeamThreadRange(const Impl::SYCLTeamMember& thread, iType1 begin, iType2 end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Impl::SYCLTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>
+    TeamVectorRange(const Impl::SYCLTeamMember& thread, const iType& count) {
+  return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::SYCLTeamMember>
+TeamVectorRange(const Impl::SYCLTeamMember& thread, const iType1& begin,
+                const iType2& end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>
+    ThreadVectorRange(const Impl::SYCLTeamMember& thread, iType count) {
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::SYCLTeamMember>
+ThreadVectorRange(const Impl::SYCLTeamMember& thread, iType1 arg_begin,
+                  iType2 arg_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>(
+      thread, iType(arg_begin), iType(arg_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::SYCLTeamMember> PerTeam(
+    const Impl::SYCLTeamMember& thread) {
+  return Impl::ThreadSingleStruct<Impl::SYCLTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::SYCLTeamMember> PerThread(
+    const Impl::SYCLTeamMember& thread) {
+  return Impl::VectorSingleStruct<Impl::SYCLTeamMember>(thread);
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Inter-thread parallel_for.
+ *
+ *  Executes closure(iType i) for each i=[0..N).
+ *
+ * The range [0..N) is mapped to all threads of the calling thread team.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  for (iType i = loop_boundaries.start +
+                 loop_boundaries.member.item().get_local_id(0);
+       i < loop_boundaries.end;
+       i += loop_boundaries.member.item().get_local_range(0))
+    closure(i);
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Inter-thread parallel_reduce with a reducer.
+ *
+ *  Executes closure(iType i, ValueType & val) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all threads of the
+ *  calling thread team and a summation of val is
+ *  performed and put into result.
+ */
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::SYCLTeamMember>& loop_boundaries,
+                const Closure& closure, const ReducerType& reducer) {
+  typename ReducerType::value_type value;
+  reducer.init(value);
+
+  for (iType i = loop_boundaries.start +
+                 loop_boundaries.member.item().get_local_id(0);
+       i < loop_boundaries.end;
+       i += loop_boundaries.member.item().get_local_range(0)) {
+    closure(i, value);
+  }
+
+  loop_boundaries.member.team_reduce(reducer, value);
+}
+
+/** \brief  Inter-thread parallel_reduce assuming summation.
+ *
+ *  Executes closure(iType i, ValueType & val) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all threads of the
+ *  calling thread team and a summation of val is
+ *  performed and put into result.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::SYCLTeamMember>& loop_boundaries,
+                const Closure& closure, ValueType& result) {
+  ValueType val;
+  Kokkos::Sum<ValueType> reducer(val);
+
+  reducer.init(reducer.reference());
+
+  for (iType i = loop_boundaries.start +
+                 loop_boundaries.member.item().get_local_id(0);
+       i < loop_boundaries.end;
+       i += loop_boundaries.member.item().get_local_range(0)) {
+    closure(i, val);
+  }
+
+  loop_boundaries.member.team_reduce(reducer, val);
+  result = reducer.reference();
+}
+
+/** \brief  Inter-thread parallel exclusive prefix sum.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to each rank in the team (whose global rank is
+ *  less than N) and a scan operation is performed. The last call to closure has
+ *  final == true.
+ */
+// This is the same code as in CUDA and largely the same as in OpenMPTarget
+template <typename iType, typename FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::TeamThreadRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
+        loop_bounds,
+    const FunctorType& lambda) {
+  // Extract value_type from lambda
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void,
+      FunctorType>::value_type;
+
+  const auto start     = loop_bounds.start;
+  const auto end       = loop_bounds.end;
+  auto& member         = loop_bounds.member;
+  const auto team_size = member.team_size();
+  const auto team_rank = member.team_rank();
+  const auto nchunk    = (end - start + team_size - 1) / team_size;
+  value_type accum     = 0;
+  // each team has to process one or more chunks of the prefix scan
+  for (iType i = 0; i < nchunk; ++i) {
+    auto ii = start + i * team_size + team_rank;
+    // local accumulation for this chunk
+    value_type local_accum = 0;
+    // user updates value with prefix value
+    if (ii < loop_bounds.end) lambda(ii, local_accum, false);
+    // perform team scan
+    local_accum = member.team_scan(local_accum);
+    // add this blocks accum to total accumulation
+    auto val = accum + local_accum;
+    // user updates their data with total accumulation
+    if (ii < loop_bounds.end) lambda(ii, val, true);
+    // the last value needs to be propogated to next chunk
+    if (team_rank == team_size - 1) accum = val;
+    // broadcast last value to rest of the team
+    member.team_broadcast(accum, team_size - 1);
+  }
+}
+
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  const iType tidx0 = loop_boundaries.member.item().get_local_id(0);
+  const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
+
+  const iType grange0 = loop_boundaries.member.item().get_local_range(0);
+  const iType grange1 = loop_boundaries.member.item().get_local_range(1);
+
+  for (iType i = loop_boundaries.start + tidx0 * grange1 + tidx1;
+       i < loop_boundaries.end; i += grange0 * grange1)
+    closure(i);
+}
+
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::SYCLTeamMember>& loop_boundaries,
+                const Closure& closure, const ReducerType& reducer) {
+  typename ReducerType::value_type value;
+  reducer.init(value);
+
+  const iType tidx0 = loop_boundaries.member.item().get_local_id(0);
+  const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
+
+  const iType grange0 = loop_boundaries.member.item().get_local_range(0);
+  const iType grange1 = loop_boundaries.member.item().get_local_range(1);
+
+  for (iType i = loop_boundaries.start + tidx0 * grange1 + tidx1;
+       i < loop_boundaries.end; i += grange0 * grange1)
+    closure(i, value);
+
+  loop_boundaries.member.vector_reduce(reducer, value);
+  loop_boundaries.member.team_reduce(reducer, value);
+}
+
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamVectorRangeBoundariesStruct<
+                    iType, Impl::SYCLTeamMember>& loop_boundaries,
+                const Closure& closure, ValueType& result) {
+  ValueType val;
+  Kokkos::Sum<ValueType> reducer(val);
+
+  reducer.init(reducer.reference());
+
+  const iType tidx0 = loop_boundaries.member.item().get_local_id(0);
+  const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
+
+  const iType grange0 = loop_boundaries.member.item().get_local_range(0);
+  const iType grange1 = loop_boundaries.member.item().get_local_range(1);
+
+  for (iType i = loop_boundaries.start + tidx0 * grange1 + tidx1;
+       i < loop_boundaries.end; i += grange0 * grange1)
+    closure(i, val);
+
+  loop_boundaries.member.vector_reduce(reducer);
+  loop_boundaries.member.team_reduce(reducer);
+  result = reducer.reference();
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel_for.
+ *
+ *  Executes closure(iType i) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to all vector lanes of the calling thread.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  const iType tidx1   = loop_boundaries.member.item().get_local_id(1);
+  const iType grange1 = loop_boundaries.member.item().get_local_range(1);
+
+  for (iType i = loop_boundaries.start + tidx1; i < loop_boundaries.end;
+       i += grange1)
+    closure(i);
+
+  // FIXME_SYCL We only should fence active threads here but this not yet
+  // available in the compiler. We need https://github.com/intel/llvm/pull/4904
+  // or https://github.com/intel/llvm/pull/4903 for that. The current
+  // implementation leads to a deadlock only for SYCL+CUDA if not all threads in
+  // a subgroup see this barrier. For SYCL on Intel GPUs, the subgroup barrier
+  // is essentially a no-op (only a memory fence), though.
+  sycl::group_barrier(loop_boundaries.member.item().get_sub_group());
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel_reduce.
+ *
+ *  Calls closure(iType i, ValueType & val) for each i=[0..N).
+ *
+ *  The range [0..N) is mapped to all vector lanes of
+ *  the calling thread and a reduction of val is performed using +=
+ *  and output into result.
+ *
+ *  The identity value for the += operator is assumed to be the default
+ *  constructed value.
+ */
+template <typename iType, class Closure, class ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::SYCLTeamMember> const& loop_boundaries,
+                Closure const& closure, ReducerType const& reducer) {
+  reducer.init(reducer.reference());
+
+  const iType tidx1   = loop_boundaries.member.item().get_local_id(1);
+  const iType grange1 = loop_boundaries.member.item().get_local_range(1);
+
+  for (iType i = loop_boundaries.start + tidx1; i < loop_boundaries.end;
+       i += grange1)
+    closure(i, reducer.reference());
+
+  loop_boundaries.member.vector_reduce(reducer);
+}
+
+/** \brief  Intra-thread vector parallel_reduce.
+ *
+ *  Calls closure(iType i, ValueType & val) for each i=[0..N).
+ *
+ *  The range [0..N) is mapped to all vector lanes of
+ *  the calling thread and a reduction of val is performed using +=
+ *  and output into result.
+ *
+ *  The identity value for the += operator is assumed to be the default
+ *  constructed value.
+ */
+template <typename iType, class Closure, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!is_reducer<ValueType>::value>
+parallel_reduce(Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::SYCLTeamMember> const& loop_boundaries,
+                Closure const& closure, ValueType& result) {
+  result = ValueType();
+
+  const iType tidx1 = loop_boundaries.member.item().get_local_id(1);
+  const int grange1 = loop_boundaries.member.item().get_local_range(1);
+
+  for (iType i = loop_boundaries.start + tidx1; i < loop_boundaries.end;
+       i += grange1)
+    closure(i, result);
+
+  loop_boundaries.member.vector_reduce(Kokkos::Sum<ValueType>(result));
+}
+
+//----------------------------------------------------------------------------
+
+/** \brief  Intra-thread vector parallel exclusive prefix sum with reducer.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all vector lanes in the
+ *  thread and a scan operation is performed.
+ *  The last call to closure has final == true.
+ */
+template <typename iType, class Closure, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_scan(const Impl::ThreadVectorRangeBoundariesStruct<
+                  iType, Impl::SYCLTeamMember>& loop_boundaries,
+              const Closure& closure, const ReducerType& reducer) {
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+
+  value_type accum;
+  reducer.init(accum);
+  const value_type identity = accum;
+
+  // Loop through boundaries by vector-length chunks must scan at each iteration
+
+  // All thread "lanes" must loop the same number of times.
+  // Determine an loop end for all thread "lanes."
+  // Requires:
+  //   grange1 is power of two and thus
+  //     ( end % grange1 ) == ( end & ( grange1 - 1 ) )
+  //   1 <= grange1 <= sub_group size
+
+  const iType tidx1   = loop_boundaries.member.item().get_local_id(1);
+  const iType grange1 = loop_boundaries.member.item().get_local_range(1);
+
+  const int mask          = grange1 - 1;
+  const int rem           = loop_boundaries.end & mask;  // == end % grange1
+  const int end           = loop_boundaries.end + (rem ? grange1 - rem : 0);
+  const auto sg           = loop_boundaries.member.item().get_sub_group();
+  const int vector_offset = (sg.get_local_id() / grange1) * grange1;
+
+  for (int i = tidx1; i < end; i += grange1) {
+    value_type val = identity;
+
+    // First acquire per-lane contributions.
+    // This sets i's val to i-1's contribution to make the latter shfl_up an
+    // exclusive scan -- the final accumulation of i's val will be included in
+    // the second closure call later.
+    if (i < loop_boundaries.end && tidx1 > 0) closure(i - 1, val, false);
+
+    // Bottom up exclusive scan in triangular pattern where each SYCL thread is
+    // the root of a reduction tree from the zeroth "lane" to itself.
+    //  [t] += [t-1] if t >= 1
+    //  [t] += [t-2] if t >= 2
+    //  [t] += [t-4] if t >= 4
+    //  ...
+    for (int j = 1; j < static_cast<int>(grange1); j <<= 1) {
+      value_type tmp = sg.shuffle_up(val, j);
+      if (j <= static_cast<int>(tidx1)) {
+        reducer.join(val, tmp);
+      }
+    }
+
+    // Include accumulation
+    reducer.join(val, accum);
+
+    // Update i's contribution into the val and add it to accum for next round
+    if (i < loop_boundaries.end) closure(i, val, true);
+    accum = sg.shuffle(val, mask + vector_offset);
+  }
+}
+
+/** \brief  Intra-thread vector parallel exclusive prefix sum.
+ *
+ *  Executes closure(iType i, ValueType & val, bool final) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all vector lanes in the
+ *  thread and a scan operation is performed.
+ *  The last call to closure has final == true.
+ */
+template <typename iType, class Closure>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::SYCLTeamMember>&
+        loop_boundaries,
+    const Closure& closure) {
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+  value_type dummy;
+  parallel_scan(loop_boundaries, closure, Kokkos::Sum<value_type>{dummy});
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::SYCLTeamMember>& single_struct,
+    const FunctorType& lambda) {
+  if (single_struct.team_member.item().get_local_id(1) == 0) lambda();
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::SYCLTeamMember>& single_struct,
+    const FunctorType& lambda) {
+  if (single_struct.team_member.item().get_local_linear_id() == 0) lambda();
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<Impl::SYCLTeamMember>& single_struct,
+    const FunctorType& lambda, ValueType& val) {
+  const sycl::nd_item<2> item = single_struct.team_member.item();
+  const auto grange1          = item.get_local_range(1);
+  const auto sg               = item.get_sub_group();
+  if (item.get_local_id(1) == 0) lambda(val);
+  val = sg.shuffle(val, (sg.get_local_id() / grange1) * grange1);
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::SYCLTeamMember>& single_struct,
+    const FunctorType& lambda, ValueType& val) {
+  if (single_struct.team_member.item().get_local_linear_id() == 0) lambda(val);
+  single_struct.team_member.team_broadcast(val, 0);
+}
+
+}  // namespace Kokkos
+
+#endif
+
+#endif /* #ifndef KOKKOS_SYCL_TEAM_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_UniqueToken.hpp b/bundled/kokkos-3.7.00/core/src/SYCL/Kokkos_SYCL_UniqueToken.hpp
new file mode 100644 (file)
index 0000000..82bfae4
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_UNIQUE_TOKEN_HPP
+#define KOKKOS_SYCL_UNIQUE_TOKEN_HPP
+
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+#include <Kokkos_SYCL_Space.hpp>
+#include <Kokkos_UniqueToken.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace Impl {
+Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> sycl_global_unique_token_locks(
+    bool deallocate = false);
+}
+
+// both global and instance Unique Tokens are implemented in the same way
+// the global version has one shared static lock array underneath
+// but it can't be a static member variable since we need to acces it on device
+// and we share the implementation with the instance version
+template <>
+class UniqueToken<SYCL, UniqueTokenScope::Global> {
+  Kokkos::View<uint32_t*, SYCLDeviceUSMSpace> m_locks;
+
+ public:
+  using execution_space = SYCL;
+  using size_type       = int32_t;
+
+  explicit UniqueToken(execution_space const& = execution_space())
+      : m_locks(Impl::sycl_global_unique_token_locks()) {}
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken(UniqueToken&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(const UniqueToken&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  UniqueToken& operator=(UniqueToken&&) = default;
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type size() const noexcept { return m_locks.extent(0); }
+
+ protected:
+  // Constructors for the Instance version
+  UniqueToken(size_type max_size)
+      : m_locks(Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>(
+            "Kokkos::UniqueToken::m_locks", max_size)) {}
+
+  UniqueToken(size_type max_size, execution_space const& arg)
+      : m_locks(Kokkos::View<uint32_t*, SYCLDeviceUSMSpace>(
+            Kokkos::view_alloc(arg, "Kokkos::UniqueToken::m_locks"),
+            max_size)) {}
+
+ private:
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type impl_acquire() const {
+    auto item = sycl::ext::oneapi::experimental::this_nd_item<3>();
+    std::size_t threadIdx[3] = {item.get_local_id(2), item.get_local_id(1),
+                                item.get_local_id(0)};
+    std::size_t blockIdx[3]  = {item.get_group(2), item.get_group(1),
+                               item.get_group(0)};
+    std::size_t blockDim[3] = {item.get_local_range(2), item.get_local_range(1),
+                               item.get_local_range(0)};
+
+    int idx = blockIdx[0] * (blockDim[0] * blockDim[1]) +
+              threadIdx[1] * blockDim[0] + threadIdx[0];
+    idx %= size();
+
+    while (Kokkos::atomic_compare_exchange(&m_locks(idx), 0, 1) == 1) {
+      idx += blockDim[1] * blockDim[0] + 1;
+      idx %= size();
+    }
+
+    // Make sure that all writes in the previous lock owner are visible to me
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+    desul::atomic_thread_fence(desul::MemoryOrderAcquire(),
+                               desul::MemoryScopeDevice());
+#else
+    Kokkos::memory_fence();
+#endif
+    return idx;
+  }
+
+ public:
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  size_type acquire() const {
+    KOKKOS_IF_ON_DEVICE(return impl_acquire();)
+    KOKKOS_IF_ON_HOST(return 0;)
+  }
+
+  /// \brief release an acquired value
+  KOKKOS_INLINE_FUNCTION
+  void release(size_type idx) const noexcept {
+    // Make sure my writes are visible to the next lock owner
+#ifdef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+    desul::atomic_thread_fence(desul::MemoryOrderRelease(),
+                               desul::MemoryScopeDevice());
+#else
+    Kokkos::memory_fence();
+#endif
+    (void)Kokkos::atomic_exchange(&m_locks(idx), 0);
+  }
+};
+
+template <>
+class UniqueToken<SYCL, UniqueTokenScope::Instance>
+    : public UniqueToken<SYCL, UniqueTokenScope::Global> {
+ public:
+  UniqueToken()
+      : UniqueToken<SYCL, UniqueTokenScope::Global>(
+            Kokkos::Experimental::SYCL().concurrency()) {}
+
+  explicit UniqueToken(execution_space const& arg)
+      : UniqueToken<SYCL, UniqueTokenScope::Global>(
+            Kokkos::Experimental::SYCL().concurrency(), arg) {}
+
+  explicit UniqueToken(size_type max_size)
+      : UniqueToken<SYCL, UniqueTokenScope::Global>(max_size) {}
+
+  UniqueToken(size_type max_size, execution_space const& arg)
+      : UniqueToken<SYCL, UniqueTokenScope::Global>(max_size, arg) {}
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial.cpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial.cpp
new file mode 100644 (file)
index 0000000..9205e82
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+#include <Kokkos_Serial.hpp>
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+#include <cstdlib>
+#include <iostream>
+#include <sstream>
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+bool SerialInternal::is_initialized() { return m_is_initialized; }
+
+void SerialInternal::initialize() {
+  if (is_initialized()) return;
+
+  Impl::SharedAllocationRecord<void, void>::tracking_enable();
+
+  // Init the array of locks used for arbitrarily sized atomics
+  Impl::init_lock_array_host_space();
+
+  m_is_initialized = true;
+}
+
+void SerialInternal::finalize() {
+  if (m_thread_team_data.scratch_buffer()) {
+    m_thread_team_data.disband_team();
+    m_thread_team_data.disband_pool();
+
+    Kokkos::HostSpace space;
+
+    space.deallocate(m_thread_team_data.scratch_buffer(),
+                     m_thread_team_data.scratch_bytes());
+
+    m_thread_team_data.scratch_assign(nullptr, 0, 0, 0, 0, 0);
+  }
+
+  Kokkos::Profiling::finalize();
+
+  m_is_initialized = false;
+}
+
+SerialInternal& SerialInternal::singleton() {
+  static SerialInternal* self = nullptr;
+  if (!self) {
+    self = new SerialInternal();
+  }
+  return *self;
+}
+
+// Resize thread team data scratch memory
+void SerialInternal::resize_thread_team_data(size_t pool_reduce_bytes,
+                                             size_t team_reduce_bytes,
+                                             size_t team_shared_bytes,
+                                             size_t thread_local_bytes) {
+  if (pool_reduce_bytes < 512) pool_reduce_bytes = 512;
+  if (team_reduce_bytes < 512) team_reduce_bytes = 512;
+
+  const size_t old_pool_reduce  = m_thread_team_data.pool_reduce_bytes();
+  const size_t old_team_reduce  = m_thread_team_data.team_reduce_bytes();
+  const size_t old_team_shared  = m_thread_team_data.team_shared_bytes();
+  const size_t old_thread_local = m_thread_team_data.thread_local_bytes();
+  const size_t old_alloc_bytes  = m_thread_team_data.scratch_bytes();
+
+  // Allocate if any of the old allocation is tool small:
+
+  const bool allocate = (old_pool_reduce < pool_reduce_bytes) ||
+                        (old_team_reduce < team_reduce_bytes) ||
+                        (old_team_shared < team_shared_bytes) ||
+                        (old_thread_local < thread_local_bytes);
+
+  if (allocate) {
+    Kokkos::HostSpace space;
+
+    if (old_alloc_bytes) {
+      m_thread_team_data.disband_team();
+      m_thread_team_data.disband_pool();
+
+      space.deallocate("Kokkos::Serial::scratch_mem",
+                       m_thread_team_data.scratch_buffer(),
+                       m_thread_team_data.scratch_bytes());
+    }
+
+    if (pool_reduce_bytes < old_pool_reduce) {
+      pool_reduce_bytes = old_pool_reduce;
+    }
+    if (team_reduce_bytes < old_team_reduce) {
+      team_reduce_bytes = old_team_reduce;
+    }
+    if (team_shared_bytes < old_team_shared) {
+      team_shared_bytes = old_team_shared;
+    }
+    if (thread_local_bytes < old_thread_local) {
+      thread_local_bytes = old_thread_local;
+    }
+
+    const size_t alloc_bytes =
+        HostThreadTeamData::scratch_size(pool_reduce_bytes, team_reduce_bytes,
+                                         team_shared_bytes, thread_local_bytes);
+
+    void* ptr = nullptr;
+    try {
+      ptr = space.allocate("Kokkos::Serial::scratch_mem", alloc_bytes);
+    } catch (Kokkos::Experimental::RawMemoryAllocationFailure const& failure) {
+      // For now, just rethrow the error message the existing way
+      Kokkos::Impl::throw_runtime_exception(failure.get_error_message());
+    }
+
+    m_thread_team_data.scratch_assign(static_cast<char*>(ptr), alloc_bytes,
+                                      pool_reduce_bytes, team_reduce_bytes,
+                                      team_shared_bytes, thread_local_bytes);
+
+    HostThreadTeamData* pool[1] = {&m_thread_team_data};
+
+    m_thread_team_data.organize_pool(pool, 1);
+    m_thread_team_data.organize_team(1);
+  }
+}
+}  // namespace Impl
+
+Serial::Serial()
+#ifdef KOKKOS_IMPL_WORKAROUND_ICE_IN_TRILINOS_WITH_OLD_INTEL_COMPILERS
+    : m_space_instance(&Impl::SerialInternal::singleton()) {
+}
+#else
+    : m_space_instance(&Impl::SerialInternal::singleton(),
+                       [](Impl::SerialInternal*) {}) {
+}
+#endif
+
+void Serial::print_configuration(std::ostream& os, bool /*verbose*/) const {
+  os << "Host Serial Execution Space:\n";
+  os << "  KOKKOS_ENABLE_SERIAL: yes\n";
+
+  os << "Serial Atomics:\n";
+  os << "  KOKKOS_ENABLE_SERIAL_ATOMICS: ";
+#ifdef KOKKOS_ENABLE_SERIAL_ATOMICS
+  os << "yes\n";
+#else
+  os << "no\n";
+#endif
+
+  os << "\nSerial Runtime Configuration:\n";
+}
+
+bool Serial::impl_is_initialized() {
+  return Impl::SerialInternal::singleton().is_initialized();
+}
+
+void Serial::impl_initialize(InitializationSettings const&) {
+  Impl::SerialInternal::singleton().initialize();
+}
+
+void Serial::impl_finalize() { Impl::SerialInternal::singleton().finalize(); }
+
+const char* Serial::name() { return "Serial"; }
+
+namespace Impl {
+
+int g_serial_space_factory_initialized =
+    initialize_space_factory<Serial>("100_Serial");
+
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_CXX14
+namespace Tools {
+namespace Experimental {
+constexpr DeviceType DeviceTypeTraits<Serial>::id;
+}
+}  // namespace Tools
+#endif
+
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_MDRange.hpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_MDRange.hpp
new file mode 100644 (file)
index 0000000..d726a86
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKO_SERIAL_PARALLEL_MDRANGE_HPP
+#define KOKKO_SERIAL_PARALLEL_MDRANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                  Kokkos::Serial> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+
+  using iterate_type = typename Kokkos::Impl::HostIterateTile<
+      MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
+
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;
+
+  void exec() const {
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      iterate_type(m_mdr_policy, m_functor)(i);
+    }
+  }
+
+ public:
+  inline void execute() const { this->exec(); }
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+  inline ParallelFor(const FunctorType& arg_functor,
+                     const MDRangePolicy& arg_policy)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)) {}
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::Serial> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+
+  using WorkTag = typename MDRangePolicy::work_tag;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
+                         void>;
+
+  using Analysis = FunctorAnalysis<FunctorPatternInterface::REDUCE,
+                                   MDRangePolicy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+
+  using iterate_type =
+      typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
+                                             WorkTag, reference_type>;
+
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  inline void exec(reference_type update) const {
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      iterate_type(m_mdr_policy, m_functor, update)(i);
+    }
+  }
+
+ public:
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy&, const Functor&) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+  inline void execute() const {
+    const size_t pool_reduce_size =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+    const size_t team_reduce_size  = 0;  // Never shrinks
+    const size_t team_shared_size  = 0;  // Never shrinks
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    auto* internal_instance = m_policy.space().impl_internal_space_instance();
+    // Need to lock resize_thread_team_data
+    std::lock_guard<std::mutex> lock(
+        internal_instance->m_thread_team_data_mutex);
+    internal_instance->resize_thread_team_data(
+        pool_reduce_size, team_reduce_size, team_shared_size,
+        thread_local_size);
+
+    pointer_type ptr =
+        m_result_ptr
+            ? m_result_ptr
+            : pointer_type(
+                  internal_instance->m_thread_team_data.pool_reduce_local());
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    reference_type update = final_reducer.init(ptr);
+
+    this->exec(update);
+
+    final_reducer.final(ptr);
+  }
+
+  template <class HostViewType>
+  ParallelReduce(const FunctorType& arg_functor,
+                 const MDRangePolicy& arg_policy,
+                 const HostViewType& arg_result_view,
+                 std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void*> = nullptr)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result_view.data()) {
+    static_assert(Kokkos::is_view<HostViewType>::value,
+                  "Kokkos::Serial reduce result must be a View");
+
+    static_assert(
+        Kokkos::Impl::MemorySpaceAccess<typename HostViewType::memory_space,
+                                        Kokkos::HostSpace>::accessible,
+        "Kokkos::Serial reduce result must be a View in HostSpace");
+  }
+
+  inline ParallelReduce(const FunctorType& arg_functor,
+                        MDRangePolicy arg_policy, const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()) {
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_Range.hpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_Range.hpp
new file mode 100644 (file)
index 0000000..8426222
--- /dev/null
@@ -0,0 +1,337 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKO_SERIAL_PARALLEL_RANGE_HPP
+#define KOKKO_SERIAL_PARALLEL_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>, Kokkos::Serial> {
+ private:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  std::enable_if_t<std::is_void<TagType>::value> exec() const {
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(i);
+    }
+  }
+
+  template <class TagType>
+  std::enable_if_t<!std::is_void<TagType>::value> exec() const {
+    const TagType t{};
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(t, i);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    this->template exec<typename Policy::work_tag>();
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+/*--------------------------------------------------------------------------*/
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::Serial> {
+ private:
+  using Policy  = Kokkos::RangePolicy<Traits...>;
+  using WorkTag = typename Policy::work_tag;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
+                         void>;
+
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  template <class TagType>
+  inline std::enable_if_t<std::is_void<TagType>::value> exec(
+      reference_type update) const {
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(i, update);
+    }
+  }
+
+  template <class TagType>
+  inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+      reference_type update) const {
+    const TagType t{};
+
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(t, i, update);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    const size_t pool_reduce_size =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+    const size_t team_reduce_size  = 0;  // Never shrinks
+    const size_t team_shared_size  = 0;  // Never shrinks
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    auto* internal_instance = m_policy.space().impl_internal_space_instance();
+    // Need to lock resize_thread_team_data
+    std::lock_guard<std::mutex> lock(
+        internal_instance->m_thread_team_data_mutex);
+    internal_instance->resize_thread_team_data(
+        pool_reduce_size, team_reduce_size, team_shared_size,
+        thread_local_size);
+
+    pointer_type ptr =
+        m_result_ptr
+            ? m_result_ptr
+            : pointer_type(
+                  internal_instance->m_thread_team_data.pool_reduce_local());
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    reference_type update = final_reducer.init(ptr);
+
+    this->template exec<WorkTag>(update);
+
+    final_reducer.final(ptr);
+  }
+
+  template <class HostViewType>
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const HostViewType& arg_result_view,
+                 std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result_view.data()) {
+    static_assert(Kokkos::is_view<HostViewType>::value,
+                  "Kokkos::Serial reduce result must be a View");
+
+    static_assert(
+        Kokkos::Impl::MemorySpaceAccess<typename HostViewType::memory_space,
+                                        Kokkos::HostSpace>::accessible,
+        "Kokkos::Serial reduce result must be a View in HostSpace");
+  }
+
+  inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
+                        const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()) {
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+};
+
+/*--------------------------------------------------------------------------*/
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                   Kokkos::Serial> {
+ private:
+  using Policy  = Kokkos::RangePolicy<Traits...>;
+  using WorkTag = typename Policy::work_tag;
+
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  inline std::enable_if_t<std::is_void<TagType>::value> exec(
+      reference_type update) const {
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(i, update, true);
+    }
+  }
+
+  template <class TagType>
+  inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+      reference_type update) const {
+    const TagType t{};
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(t, i, update, true);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    const size_t pool_reduce_size  = Analysis::value_size(m_functor);
+    const size_t team_reduce_size  = 0;  // Never shrinks
+    const size_t team_shared_size  = 0;  // Never shrinks
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    // Need to lock resize_thread_team_data
+    auto* internal_instance = m_policy.space().impl_internal_space_instance();
+    std::lock_guard<std::mutex> lock(
+        internal_instance->m_thread_team_data_mutex);
+    internal_instance->resize_thread_team_data(
+        pool_reduce_size, team_reduce_size, team_shared_size,
+        thread_local_size);
+
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    reference_type update = final_reducer.init(pointer_type(
+        internal_instance->m_thread_team_data.pool_reduce_local()));
+
+    this->template exec<WorkTag>(update);
+  }
+
+  inline ParallelScan(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+/*--------------------------------------------------------------------------*/
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::Serial> {
+ private:
+  using Policy  = Kokkos::RangePolicy<Traits...>;
+  using WorkTag = typename Policy::work_tag;
+
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::SCAN, Policy, FunctorType>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  ReturnType& m_returnvalue;
+
+  template <class TagType>
+  inline std::enable_if_t<std::is_void<TagType>::value> exec(
+      reference_type update) const {
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(i, update, true);
+    }
+  }
+
+  template <class TagType>
+  inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+      reference_type update) const {
+    const TagType t{};
+    const typename Policy::member_type e = m_policy.end();
+    for (typename Policy::member_type i = m_policy.begin(); i < e; ++i) {
+      m_functor(t, i, update, true);
+    }
+  }
+
+ public:
+  inline void execute() {
+    const size_t pool_reduce_size  = Analysis::value_size(m_functor);
+    const size_t team_reduce_size  = 0;  // Never shrinks
+    const size_t team_shared_size  = 0;  // Never shrinks
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    // Need to lock resize_thread_team_data
+    auto* internal_instance = m_policy.space().impl_internal_space_instance();
+    std::lock_guard<std::mutex> lock(
+        internal_instance->m_thread_team_data_mutex);
+    internal_instance->resize_thread_team_data(
+        pool_reduce_size, team_reduce_size, team_shared_size,
+        thread_local_size);
+
+    typename Analysis::Reducer final_reducer(&m_functor);
+
+    reference_type update = final_reducer.init(pointer_type(
+        internal_instance->m_thread_team_data.pool_reduce_local()));
+
+    this->template exec<WorkTag>(update);
+
+    m_returnvalue = update;
+  }
+
+  inline ParallelScanWithTotal(const FunctorType& arg_functor,
+                               const Policy& arg_policy,
+                               ReturnType& arg_returnvalue)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_returnvalue(arg_returnvalue) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_Team.hpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Parallel_Team.hpp
new file mode 100644 (file)
index 0000000..782ae75
--- /dev/null
@@ -0,0 +1,424 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKO_SERIAL_PARALLEL_TEAM_HPP
+#define KOKKO_SERIAL_PARALLEL_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+/*
+ * < Kokkos::Serial , WorkArgTag >
+ * < WorkArgTag , Impl::enable_if< std::is_same< Kokkos::Serial ,
+ * Kokkos::DefaultExecutionSpace >::value >::type >
+ *
+ */
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Serial, Properties...>
+    : public PolicyTraits<Properties...> {
+ private:
+  size_t m_team_scratch_size[2];
+  size_t m_thread_scratch_size[2];
+  int m_league_size;
+  int m_chunk_size;
+
+ public:
+  //! Tag this class as a kokkos execution policy
+  using execution_policy = TeamPolicyInternal;
+
+  using traits = PolicyTraits<Properties...>;
+
+  //! Execution space of this execution policy:
+  using execution_space = Kokkos::Serial;
+
+  const typename traits::execution_space& space() const {
+    static typename traits::execution_space m_space;
+    return m_space;
+  }
+
+  template <class ExecSpace, class... OtherProperties>
+  friend class TeamPolicyInternal;
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(
+      const TeamPolicyInternal<Kokkos::Serial, OtherProperties...>& p) {
+    m_league_size            = p.m_league_size;
+    m_team_scratch_size[0]   = p.m_team_scratch_size[0];
+    m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+    m_team_scratch_size[1]   = p.m_team_scratch_size[1];
+    m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+    m_chunk_size             = p.m_chunk_size;
+  }
+
+  //----------------------------------------
+
+  template <class FunctorType>
+  int team_size_max(const FunctorType&, const ParallelForTag&) const {
+    return 1;
+  }
+  template <class FunctorType>
+  int team_size_max(const FunctorType&, const ParallelReduceTag&) const {
+    return 1;
+  }
+  template <class FunctorType, class ReducerType>
+  int team_size_max(const FunctorType&, const ReducerType&,
+                    const ParallelReduceTag&) const {
+    return 1;
+  }
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType&, const ParallelForTag&) const {
+    return 1;
+  }
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType&,
+                            const ParallelReduceTag&) const {
+    return 1;
+  }
+  template <class FunctorType, class ReducerType>
+  int team_size_recommended(const FunctorType&, const ReducerType&,
+                            const ParallelReduceTag&) const {
+    return 1;
+  }
+
+  //----------------------------------------
+
+  inline int team_size() const { return 1; }
+  inline bool impl_auto_team_size() const { return false; }
+  inline bool impl_auto_vector_length() const { return false; }
+  inline void impl_set_team_size(size_t) {}
+  inline void impl_set_vector_length(size_t) {}
+  inline int league_size() const { return m_league_size; }
+  inline size_t scratch_size(const int& level, int = 0) const {
+    return m_team_scratch_size[level] + m_thread_scratch_size[level];
+  }
+
+  inline int impl_vector_length() const { return 1; }
+  inline static int vector_length_max() {
+    return 1024;
+  }  // Use arbitrary large number, is meant as a vectorizable length
+
+  inline static int scratch_size_max(int level) {
+    return (level == 0 ? 1024 * 32 : 20 * 1024 * 1024);
+  }
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const execution_space&, int league_size_request,
+                     int team_size_request, int /* vector_length_request */ = 1)
+      : m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_league_size(league_size_request),
+        m_chunk_size(32) {
+    if (team_size_request > 1)
+      Kokkos::abort("Kokkos::abort: Requested Team Size is too large!");
+  }
+
+  TeamPolicyInternal(const execution_space& space, int league_size_request,
+                     const Kokkos::AUTO_t& /**team_size_request*/,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(space, league_size_request, -1,
+                           vector_length_request) {}
+
+  TeamPolicyInternal(const execution_space& space, int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+                     )
+      : TeamPolicyInternal(space, league_size_request, -1, -1) {}
+
+  TeamPolicyInternal(const execution_space& space, int league_size_request,
+                     int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */
+                     )
+      : TeamPolicyInternal(space, league_size_request, team_size_request, -1) {}
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& team_size_request,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, team_size_request,
+                           vector_length_request) {}
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& team_size_request,
+                     const Kokkos::AUTO_t& vector_length_request)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, team_size_request,
+                           vector_length_request) {}
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t& vector_length_request)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, team_size_request,
+                           vector_length_request) {}
+
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, team_size_request,
+                           vector_length_request) {}
+
+  inline int chunk_size() const { return m_chunk_size; }
+
+  /** \brief set chunk_size to a discrete value*/
+  inline TeamPolicyInternal& set_chunk_size(
+      typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  /** \brief set per team scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(const int& level,
+                                              const PerTeamValue& per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  /** \brief set per thread scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerThreadValue& per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  /** \brief set per thread and per team scratch size for a specific level of
+   * the scratch hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerTeamValue& per_team,
+      const PerThreadValue& per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  using member_type = Impl::HostThreadTeamMember<Kokkos::Serial>;
+};
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::Serial> {
+ private:
+  enum { TEAM_REDUCE_SIZE = 512 };
+
+  using Policy = TeamPolicyInternal<Kokkos::Serial, Properties...>;
+  using Member = typename Policy::member_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const int m_league;
+  const size_t m_shared;
+
+  template <class TagType>
+  inline std::enable_if_t<std::is_void<TagType>::value> exec(
+      HostThreadTeamData& data) const {
+    for (int ileague = 0; ileague < m_league; ++ileague) {
+      m_functor(Member(data, ileague, m_league));
+    }
+  }
+
+  template <class TagType>
+  inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+      HostThreadTeamData& data) const {
+    const TagType t{};
+    for (int ileague = 0; ileague < m_league; ++ileague) {
+      m_functor(t, Member(data, ileague, m_league));
+    }
+  }
+
+ public:
+  inline void execute() const {
+    const size_t pool_reduce_size  = 0;  // Never shrinks
+    const size_t team_reduce_size  = TEAM_REDUCE_SIZE;
+    const size_t team_shared_size  = m_shared;
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    auto* internal_instance = m_policy.space().impl_internal_space_instance();
+    // Need to lock resize_thread_team_data
+    std::lock_guard<std::mutex> lock(
+        internal_instance->m_thread_team_data_mutex);
+    internal_instance->resize_thread_team_data(
+        pool_reduce_size, team_reduce_size, team_shared_size,
+        thread_local_size);
+
+    this->template exec<typename Policy::work_tag>(
+        internal_instance->m_thread_team_data);
+  }
+
+  ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_league(arg_policy.league_size()),
+        m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(arg_functor, 1)) {}
+};
+
+/*--------------------------------------------------------------------------*/
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::Serial> {
+ private:
+  enum { TEAM_REDUCE_SIZE = 512 };
+
+  using Policy = TeamPolicyInternal<Kokkos::Serial, Properties...>;
+
+  using Member  = typename Policy::member_type;
+  using WorkTag = typename Policy::work_tag;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      std::conditional_t<std::is_same<InvalidType, ReducerType>::value, WorkTag,
+                         void>;
+
+  using Analysis =
+      FunctorAnalysis<FunctorPatternInterface::REDUCE, Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const int m_league;
+  const ReducerType m_reducer;
+  pointer_type m_result_ptr;
+  size_t m_shared;
+
+  template <class TagType>
+  inline std::enable_if_t<std::is_void<TagType>::value> exec(
+      HostThreadTeamData& data, reference_type update) const {
+    for (int ileague = 0; ileague < m_league; ++ileague) {
+      m_functor(Member(data, ileague, m_league), update);
+    }
+  }
+
+  template <class TagType>
+  inline std::enable_if_t<!std::is_void<TagType>::value> exec(
+      HostThreadTeamData& data, reference_type update) const {
+    const TagType t{};
+
+    for (int ileague = 0; ileague < m_league; ++ileague) {
+      m_functor(t, Member(data, ileague, m_league), update);
+    }
+  }
+
+ public:
+  inline void execute() const {
+    const size_t pool_reduce_size =
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer));
+
+    const size_t team_reduce_size  = TEAM_REDUCE_SIZE;
+    const size_t team_shared_size  = m_shared;
+    const size_t thread_local_size = 0;  // Never shrinks
+
+    auto* internal_instance = m_policy.space().impl_internal_space_instance();
+    // Need to lock resize_thread_team_data
+    std::lock_guard<std::mutex> lock(
+        internal_instance->m_thread_team_data_mutex);
+    internal_instance->resize_thread_team_data(
+        pool_reduce_size, team_reduce_size, team_shared_size,
+        thread_local_size);
+
+    pointer_type ptr =
+        m_result_ptr
+            ? m_result_ptr
+            : pointer_type(
+                  internal_instance->m_thread_team_data.pool_reduce_local());
+
+    typename Analysis::Reducer final_reducer(
+        &ReducerConditional::select(m_functor, m_reducer));
+
+    reference_type update = final_reducer.init(ptr);
+
+    this->template exec<WorkTag>(internal_instance->m_thread_team_data, update);
+
+    final_reducer.final(ptr);
+  }
+
+  template <class ViewType>
+  ParallelReduce(const FunctorType& arg_functor, const Policy& arg_policy,
+                 const ViewType& arg_result,
+                 std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void*> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_league(arg_policy.league_size()),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(m_functor, 1)) {
+    static_assert(Kokkos::is_view<ViewType>::value,
+                  "Reduction result on Kokkos::Serial must be a Kokkos::View");
+
+    static_assert(
+        Kokkos::Impl::MemorySpaceAccess<typename ViewType::memory_space,
+                                        Kokkos::HostSpace>::accessible,
+        "Reduction result on Kokkos::Serial must be a Kokkos::View in "
+        "HostSpace");
+  }
+
+  inline ParallelReduce(const FunctorType& arg_functor, Policy arg_policy,
+                        const ReducerType& reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_league(arg_policy.league_size()),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_shared(arg_policy.scratch_size(0) + arg_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(arg_functor, 1)) {
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                            , Kokkos::HostSpace >::value
+    , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+    );*/
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Task.cpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Task.cpp
new file mode 100644 (file)
index 0000000..468f27e
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core.hpp>
+
+#include <Serial/Kokkos_Serial_Task.hpp>
+#include <impl/Kokkos_TaskQueue_impl.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template class TaskQueue<Kokkos::Serial, typename Kokkos::Serial::memory_space>;
+
+}
+}  // namespace Kokkos
+
+#else
+void KOKKOS_CORE_SRC_IMPL_SERIAL_TASK_PREVENT_LINK_ERROR() {}
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Task.hpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_Task.hpp
new file mode 100644 (file)
index 0000000..8d8c1d7
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_SERIAL_TASK_HPP
+#define KOKKOS_IMPL_SERIAL_TASK_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+
+#include <impl/Kokkos_TaskQueue.hpp>
+#include <Kokkos_Serial.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class QueueType>
+class TaskQueueSpecialization<SimpleTaskScheduler<Kokkos::Serial, QueueType>> {
+ public:
+  // Note: Scheduler may be an incomplete type at class scope (but not inside
+  // of the methods, obviously)
+
+  using execution_space = Kokkos::Serial;
+  using memory_space    = Kokkos::HostSpace;
+  using scheduler_type  = SimpleTaskScheduler<Kokkos::Serial, QueueType>;
+  using member_type =
+      TaskTeamMemberAdapter<HostThreadTeamMember<Kokkos::Serial>,
+                            scheduler_type>;
+
+  static void execute(scheduler_type const& scheduler) {
+    using task_base_type = typename scheduler_type::task_base_type;
+
+    auto const& serial_execution_space = scheduler.get_execution_space();
+
+    // Set default buffers
+    serial_execution_space.impl_internal_space_instance()
+        ->resize_thread_team_data(0,   /* global reduce buffer */
+                                  512, /* team reduce buffer */
+                                  0,   /* team shared buffer */
+                                  0    /* thread local buffer */
+        );
+
+    auto& self = serial_execution_space.impl_internal_space_instance()
+                     ->m_thread_team_data;
+
+    auto& queue         = scheduler.queue();
+    auto team_scheduler = scheduler.get_team_scheduler(0);
+
+    member_type member(scheduler, self);
+
+    auto current_task = OptionalRef<task_base_type>(nullptr);
+
+    while (!queue.is_done()) {
+      // Each team lead attempts to acquire either a thread team task
+      // or a single thread task for the team.
+
+      // pop a task off
+      current_task = queue.pop_ready_task(team_scheduler.team_scheduler_info());
+
+      // run the task
+      if (current_task) {
+        current_task->as_runnable_task().run(member);
+        // Respawns are handled in the complete function
+        queue.complete((*std::move(current_task)).as_runnable_task(),
+                       team_scheduler.team_scheduler_info());
+      }
+    }
+  }
+
+  static constexpr uint32_t get_max_team_count(
+      execution_space const&) noexcept {
+    return 1;
+  }
+
+  template <typename TaskType>
+  static void get_function_pointer(typename TaskType::function_type& ptr,
+                                   typename TaskType::destroy_type& dtor) {
+    ptr  = TaskType::apply;
+    dtor = TaskType::destroy;
+  }
+};
+
+//----------------------------------------------------------------------------
+
+template <class Scheduler>
+class TaskQueueSpecializationConstrained<
+    Scheduler,
+    std::enable_if_t<std::is_same<typename Scheduler::execution_space,
+                                  Kokkos::Serial>::value>> {
+ public:
+  // Note: Scheduler may be an incomplete type at class scope (but not inside
+  // of the methods, obviously)
+
+  using execution_space = Kokkos::Serial;
+  using memory_space    = Kokkos::HostSpace;
+  using scheduler_type  = Scheduler;
+  using member_type =
+      TaskTeamMemberAdapter<HostThreadTeamMember<Kokkos::Serial>,
+                            scheduler_type>;
+
+  static void iff_single_thread_recursive_execute(
+      scheduler_type const& scheduler) {
+    using task_base_type = TaskBase;
+    using queue_type     = typename scheduler_type::queue_type;
+
+    task_base_type* const end = (task_base_type*)task_base_type::EndTag;
+
+    execution_space serial_execution_space;
+    auto& data = serial_execution_space.impl_internal_space_instance()
+                     ->m_thread_team_data;
+
+    member_type exec(scheduler, data);
+
+    // Loop until no runnable task
+
+    task_base_type* task = end;
+
+    auto* const queue = scheduler.m_queue;
+
+    do {
+      task = end;
+
+      for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+        for (int j = 0; j < 2 && end == task; ++j) {
+          task = queue_type::pop_ready_task(&queue->m_ready[i][j]);
+        }
+      }
+
+      if (end == task) break;
+
+      (*task->m_apply)(task, &exec);
+
+      queue->complete(task);
+
+    } while (1);
+  }
+
+  static void execute(scheduler_type const& scheduler) {
+    using task_base_type = TaskBase;
+    using queue_type     = typename scheduler_type::queue_type;
+
+    task_base_type* const end = (task_base_type*)task_base_type::EndTag;
+
+    execution_space serial_execution_space;
+
+    // Set default buffers
+    serial_execution_space.impl_internal_space_instance()
+        ->resize_thread_team_data(0,   /* global reduce buffer */
+                                  512, /* team reduce buffer */
+                                  0,   /* team shared buffer */
+                                  0    /* thread local buffer */
+        );
+
+    auto* const queue = scheduler.m_queue;
+
+    auto& data = serial_execution_space.impl_internal_space_instance()
+                     ->m_thread_team_data;
+
+    member_type exec(scheduler, data);
+
+    // Loop until all queues are empty
+    while (0 < queue->m_ready_count) {
+      task_base_type* task = end;
+
+      for (int i = 0; i < queue_type::NumQueue && end == task; ++i) {
+        for (int j = 0; j < 2 && end == task; ++j) {
+          task = queue_type::pop_ready_task(&queue->m_ready[i][j]);
+        }
+      }
+
+      if (end != task) {
+        // pop_ready_task resulted in lock == task->m_next
+        // In the executing state
+
+        (*task->m_apply)(task, &exec);
+
+        // If a respawn then re-enqueue otherwise the task is complete
+        // and all tasks waiting on this task are updated.
+        queue->complete(task);
+      } else if (0 != queue->m_ready_count) {
+        Kokkos::abort("TaskQueue<Serial>::execute ERROR: ready_count");
+      }
+    }
+  }
+
+  template <typename TaskType>
+  static void get_function_pointer(typename TaskType::function_type& ptr,
+                                   typename TaskType::destroy_type& dtor) {
+    ptr  = TaskType::apply;
+    dtor = TaskType::destroy;
+  }
+};
+
+extern template class TaskQueue<Kokkos::Serial,
+                                typename Kokkos::Serial::memory_space>;
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_SERIAL_TASK_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_UniqueToken.hpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_UniqueToken.hpp
new file mode 100644 (file)
index 0000000..cc845f3
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SERIAL_UNIQUE_TOKEN_HPP
+#define KOKKOS_SERIAL_UNIQUE_TOKEN_HPP
+
+#include <Kokkos_UniqueToken.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+template <>
+class UniqueToken<Serial, UniqueTokenScope::Instance> {
+ public:
+  using execution_space = Serial;
+  using size_type       = int;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const& = execution_space()) noexcept {}
+
+  /// \brief create object size for requested size on given instance
+  ///
+  /// It is the users responsibility to only acquire size tokens concurrently
+  UniqueToken(size_type, execution_space const& = execution_space()) {}
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int size() const noexcept { return 1; }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int acquire() const noexcept { return 0; }
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(int) const noexcept {}
+};
+
+template <>
+class UniqueToken<Serial, UniqueTokenScope::Global> {
+ public:
+  using execution_space = Serial;
+  using size_type       = int;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const& = execution_space()) noexcept {}
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int size() const noexcept { return 1; }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int acquire() const noexcept { return 0; }
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(int) const noexcept {}
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_WorkGraphPolicy.hpp b/bundled/kokkos-3.7.00/core/src/Serial/Kokkos_Serial_WorkGraphPolicy.hpp
new file mode 100644 (file)
index 0000000..0598017
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP
+#define KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+                  Kokkos::Serial> {
+ private:
+  using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+
+  Policy m_policy;
+  FunctorType m_functor;
+
+  template <class TagType>
+  std::enable_if_t<std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    m_functor(w);
+  }
+
+  template <class TagType>
+  std::enable_if_t<!std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    const TagType t{};
+    m_functor(t, w);
+  }
+
+ public:
+  inline void execute() const noexcept {
+    // Spin until COMPLETED_TOKEN.
+    // END_TOKEN indicates no work is currently available.
+
+    for (std::int32_t w = Policy::END_TOKEN;
+         Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+      if (Policy::END_TOKEN != w) {
+        exec_one<typename Policy::work_tag>(w);
+        m_policy.completed_work(w);
+      }
+    }
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #define KOKKOS_SERIAL_WORKGRAPHPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsExec.cpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsExec.cpp
new file mode 100644 (file)
index 0000000..346eb1d
--- /dev/null
@@ -0,0 +1,902 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <cstdint>
+#include <limits>
+#include <utility>
+#include <iostream>
+#include <sstream>
+#include <thread>
+#include <mutex>
+
+#include <Kokkos_Core.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_CPUDiscovery.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+namespace {
+std::mutex host_internal_cppthread_mutex;
+
+// std::thread compatible driver.
+// Recovery from an exception would require constant intra-thread health
+// verification; which would negatively impact runtime.  As such simply
+// abort the process.
+void internal_cppthread_driver() {
+  try {
+    ThreadsExec::driver();
+  } catch (const std::exception &x) {
+    std::cerr << "Exception thrown from worker thread: " << x.what()
+              << std::endl;
+    std::cerr.flush();
+    std::abort();
+  } catch (...) {
+    std::cerr << "Exception thrown from worker thread" << std::endl;
+    std::cerr.flush();
+    std::abort();
+  }
+}
+
+ThreadsExec s_threads_process;
+ThreadsExec *s_threads_exec[ThreadsExec::MAX_THREAD_COUNT] = {nullptr};
+std::thread::id s_threads_pid[ThreadsExec::MAX_THREAD_COUNT];
+std::pair<unsigned, unsigned> s_threads_coord[ThreadsExec::MAX_THREAD_COUNT];
+
+int s_thread_pool_size[3] = {0, 0, 0};
+
+unsigned s_current_reduce_size = 0;
+unsigned s_current_shared_size = 0;
+
+void (*volatile s_current_function)(ThreadsExec &, const void *);
+const void *volatile s_current_function_arg = nullptr;
+
+struct Sentinel {
+  ~Sentinel() {
+    if (s_thread_pool_size[0] || s_thread_pool_size[1] ||
+        s_thread_pool_size[2] || s_current_reduce_size ||
+        s_current_shared_size || s_current_function || s_current_function_arg ||
+        s_threads_exec[0]) {
+      std::cerr << "ERROR : Process exiting while Kokkos::Threads is still "
+                   "initialized"
+                << std::endl;
+    }
+  }
+};
+
+inline unsigned fan_size(const unsigned rank, const unsigned size) {
+  const unsigned rank_rev = size - (rank + 1);
+  unsigned count          = 0;
+  for (unsigned n = 1; (rank_rev + n < size) && !(rank_rev & n); n <<= 1) {
+    ++count;
+  }
+  return count;
+}
+
+}  // namespace
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+// Spawn a thread
+
+void ThreadsExec::spawn() {
+  std::thread t(internal_cppthread_driver);
+  t.detach();
+}
+
+//----------------------------------------------------------------------------
+
+bool ThreadsExec::is_process() {
+  static const std::thread::id master_pid = std::this_thread::get_id();
+
+  return master_pid == std::this_thread::get_id();
+}
+
+void ThreadsExec::global_lock() { host_internal_cppthread_mutex.lock(); }
+
+void ThreadsExec::global_unlock() { host_internal_cppthread_mutex.unlock(); }
+
+//----------------------------------------------------------------------------
+
+void ThreadsExec::wait_yield(volatile int &flag, const int value) {
+  while (value == flag) {
+    std::this_thread::yield();
+  }
+}
+
+void execute_function_noop(ThreadsExec &, const void *) {}
+
+void ThreadsExec::driver() {
+  SharedAllocationRecord<void, void>::tracking_enable();
+
+  ThreadsExec this_thread;
+
+  while (ThreadsExec::Active == this_thread.m_pool_state) {
+    (*s_current_function)(this_thread, s_current_function_arg);
+
+    // Deactivate thread and wait for reactivation
+    this_thread.m_pool_state = ThreadsExec::Inactive;
+
+    wait_yield(this_thread.m_pool_state, ThreadsExec::Inactive);
+  }
+}
+
+ThreadsExec::ThreadsExec()
+    : m_pool_base(nullptr),
+      m_scratch(nullptr),
+      m_scratch_reduce_end(0),
+      m_scratch_thread_end(0),
+      m_numa_rank(0),
+      m_numa_core_rank(0),
+      m_pool_rank(0),
+      m_pool_size(0),
+      m_pool_fan_size(0),
+      m_pool_state(ThreadsExec::Terminating) {
+  if (&s_threads_process != this) {
+    // A spawned thread
+
+    ThreadsExec *const nil = nullptr;
+
+    // Which entry in 's_threads_exec', possibly determined from hwloc binding
+    const int entry = reinterpret_cast<size_t>(s_current_function_arg) <
+                              size_t(s_thread_pool_size[0])
+                          ? reinterpret_cast<size_t>(s_current_function_arg)
+                          : size_t(Kokkos::hwloc::bind_this_thread(
+                                s_thread_pool_size[0], s_threads_coord));
+
+    // Given a good entry set this thread in the 's_threads_exec' array
+    if (entry < s_thread_pool_size[0] &&
+        nil == atomic_compare_exchange(s_threads_exec + entry, nil, this)) {
+      const std::pair<unsigned, unsigned> coord =
+          Kokkos::hwloc::get_this_thread_coordinate();
+
+      m_numa_rank      = coord.first;
+      m_numa_core_rank = coord.second;
+      m_pool_base      = s_threads_exec;
+      m_pool_rank      = s_thread_pool_size[0] - (entry + 1);
+      m_pool_rank_rev  = s_thread_pool_size[0] - (pool_rank() + 1);
+      m_pool_size      = s_thread_pool_size[0];
+      m_pool_fan_size  = fan_size(m_pool_rank, m_pool_size);
+      m_pool_state     = ThreadsExec::Active;
+
+      s_threads_pid[m_pool_rank] = std::this_thread::get_id();
+
+      // Inform spawning process that the threads_exec entry has been set.
+      s_threads_process.m_pool_state = ThreadsExec::Active;
+    } else {
+      // Inform spawning process that the threads_exec entry could not be set.
+      s_threads_process.m_pool_state = ThreadsExec::Terminating;
+    }
+  } else {
+    // Enables 'parallel_for' to execute on unitialized Threads device
+    m_pool_rank  = 0;
+    m_pool_size  = 1;
+    m_pool_state = ThreadsExec::Inactive;
+
+    s_threads_pid[m_pool_rank] = std::this_thread::get_id();
+  }
+}
+
+ThreadsExec::~ThreadsExec() {
+  const unsigned entry = m_pool_size - (m_pool_rank + 1);
+
+  using Record = Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
+
+  if (m_scratch) {
+    Record *const r = Record::get_record(m_scratch);
+
+    m_scratch = nullptr;
+
+    Record::decrement(r);
+  }
+
+  m_pool_base          = nullptr;
+  m_scratch_reduce_end = 0;
+  m_scratch_thread_end = 0;
+  m_numa_rank          = 0;
+  m_numa_core_rank     = 0;
+  m_pool_rank          = 0;
+  m_pool_size          = 0;
+  m_pool_fan_size      = 0;
+
+  m_pool_state = ThreadsExec::Terminating;
+
+  if (&s_threads_process != this && entry < MAX_THREAD_COUNT) {
+    ThreadsExec *const nil = nullptr;
+
+    atomic_compare_exchange(s_threads_exec + entry, this, nil);
+
+    s_threads_process.m_pool_state = ThreadsExec::Terminating;
+  }
+}
+
+int ThreadsExec::get_thread_count() { return s_thread_pool_size[0]; }
+
+ThreadsExec *ThreadsExec::get_thread(const int init_thread_rank) {
+  ThreadsExec *const th =
+      init_thread_rank < s_thread_pool_size[0]
+          ? s_threads_exec[s_thread_pool_size[0] - (init_thread_rank + 1)]
+          : nullptr;
+
+  if (nullptr == th || th->m_pool_rank != init_thread_rank) {
+    std::ostringstream msg;
+    msg << "Kokkos::Impl::ThreadsExec::get_thread ERROR : "
+        << "thread " << init_thread_rank << " of " << s_thread_pool_size[0];
+    if (nullptr == th) {
+      msg << " does not exist";
+    } else {
+      msg << " has wrong thread_rank " << th->m_pool_rank;
+    }
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+
+  return th;
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadsExec::execute_sleep(ThreadsExec &exec, const void *) {
+  ThreadsExec::global_lock();
+  ThreadsExec::global_unlock();
+
+  const int n        = exec.m_pool_fan_size;
+  const int rank_rev = exec.m_pool_size - (exec.m_pool_rank + 1);
+
+  for (int i = 0; i < n; ++i) {
+    Impl::spinwait_while_equal<int>(
+        exec.m_pool_base[rank_rev + (1 << i)]->m_pool_state,
+        ThreadsExec::Active);
+  }
+
+  exec.m_pool_state = ThreadsExec::Inactive;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+void ThreadsExec::verify_is_process(const std::string &name,
+                                    const bool initialized) {
+  if (!is_process()) {
+    std::string msg(name);
+    msg.append(
+        " FAILED : Called by a worker thread, can only be called by the master "
+        "process.");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+
+  if (initialized && 0 == s_thread_pool_size[0]) {
+    std::string msg(name);
+    msg.append(" FAILED : Threads not initialized.");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+}
+
+int ThreadsExec::in_parallel() {
+  // A thread function is in execution and
+  // the function argument is not the special threads process argument and
+  // the master process is a worker or is not the master process.
+  return s_current_function && (&s_threads_process != s_current_function_arg) &&
+         (s_threads_process.m_pool_base || !is_process());
+}
+void ThreadsExec::fence() { internal_fence(Impl::fence_is_static::yes); }
+void ThreadsExec::fence(const std::string &name) {
+  internal_fence(name, Impl::fence_is_static::yes);
+}
+
+void ThreadsExec::internal_fence(Impl::fence_is_static is_static) {
+  internal_fence((is_static == Impl::fence_is_static::no)
+                     ? "Kokkos::ThreadsExec::fence: Unnamed Instance Fence"
+                     : "Kokkos::ThreadsExec::fence: Unnamed Static Fence",
+                 is_static);
+}
+
+// Wait for root thread to become inactive
+void ThreadsExec::internal_fence(const std::string &name,
+                                 Impl::fence_is_static is_static) {
+  const auto &fence_lam = [&]() {
+    if (s_thread_pool_size[0]) {
+      // Wait for the root thread to complete:
+      Impl::spinwait_while_equal<int>(s_threads_exec[0]->m_pool_state,
+                                      ThreadsExec::Active);
+    }
+
+    s_current_function     = nullptr;
+    s_current_function_arg = nullptr;
+
+    // Make sure function and arguments are cleared before
+    // potentially re-activating threads with a subsequent launch.
+    memory_fence();
+  };
+  if (is_static == Impl::fence_is_static::yes) {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Threads>(
+        name,
+        Kokkos::Tools::Experimental::SpecialSynchronizationCases::
+            GlobalDeviceSynchronization,
+        fence_lam);
+  } else {
+    Kokkos::Tools::Experimental::Impl::profile_fence_event<Kokkos::Threads>(
+        name, Kokkos::Tools::Experimental::Impl::DirectFenceIDHandle{1},
+        fence_lam);
+  }
+}
+
+/** \brief  Begin execution of the asynchronous functor */
+void ThreadsExec::start(void (*func)(ThreadsExec &, const void *),
+                        const void *arg) {
+  verify_is_process("ThreadsExec::start", true);
+
+  if (s_current_function || s_current_function_arg) {
+    Kokkos::Impl::throw_runtime_exception(
+        std::string("ThreadsExec::start() FAILED : already executing"));
+  }
+
+  s_current_function     = func;
+  s_current_function_arg = arg;
+
+  // Make sure function and arguments are written before activating threads.
+  memory_fence();
+
+  // Activate threads:
+  for (int i = s_thread_pool_size[0]; 0 < i--;) {
+    s_threads_exec[i]->m_pool_state = ThreadsExec::Active;
+  }
+
+  if (s_threads_process.m_pool_size) {
+    // Master process is the root thread, run it:
+    (*func)(s_threads_process, arg);
+    s_threads_process.m_pool_state = ThreadsExec::Inactive;
+  }
+}
+
+//----------------------------------------------------------------------------
+
+bool ThreadsExec::sleep() {
+  verify_is_process("ThreadsExec::sleep", true);
+
+  if (&execute_sleep == s_current_function) return false;
+
+  fence();
+
+  ThreadsExec::global_lock();
+
+  s_current_function = &execute_sleep;
+
+  // Activate threads:
+  for (unsigned i = s_thread_pool_size[0]; 0 < i;) {
+    s_threads_exec[--i]->m_pool_state = ThreadsExec::Active;
+  }
+
+  return true;
+}
+
+bool ThreadsExec::wake() {
+  verify_is_process("ThreadsExec::wake", true);
+
+  if (&execute_sleep != s_current_function) return false;
+
+  ThreadsExec::global_unlock();
+
+  if (s_threads_process.m_pool_base) {
+    execute_sleep(s_threads_process, nullptr);
+    s_threads_process.m_pool_state = ThreadsExec::Inactive;
+  }
+
+  fence();
+
+  return true;
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadsExec::execute_resize_scratch_in_serial() {
+  const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
+
+  auto deallocate_scratch_memory = [](ThreadsExec &exec) {
+    if (exec.m_scratch) {
+      using Record =
+          Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
+      Record *const r = Record::get_record(exec.m_scratch);
+      exec.m_scratch  = nullptr;
+      Record::decrement(r);
+    }
+  };
+  if (s_threads_process.m_pool_base) {
+    for (unsigned i = s_thread_pool_size[0]; begin < i;) {
+      deallocate_scratch_memory(*s_threads_exec[--i]);
+    }
+  }
+
+  s_current_function     = &first_touch_allocate_thread_private_scratch;
+  s_current_function_arg = &s_threads_process;
+
+  // Make sure function and arguments are written before activating threads.
+  memory_fence();
+
+  for (unsigned i = s_thread_pool_size[0]; begin < i;) {
+    ThreadsExec &th = *s_threads_exec[--i];
+
+    th.m_pool_state = ThreadsExec::Active;
+
+    wait_yield(th.m_pool_state, ThreadsExec::Active);
+  }
+
+  if (s_threads_process.m_pool_base) {
+    deallocate_scratch_memory(s_threads_process);
+    s_threads_process.m_pool_state = ThreadsExec::Active;
+    first_touch_allocate_thread_private_scratch(s_threads_process, nullptr);
+    s_threads_process.m_pool_state = ThreadsExec::Inactive;
+  }
+
+  s_current_function_arg = nullptr;
+  s_current_function     = nullptr;
+
+  // Make sure function and arguments are cleared before proceeding.
+  memory_fence();
+}
+
+//----------------------------------------------------------------------------
+
+void *ThreadsExec::root_reduce_scratch() {
+  return s_threads_process.reduce_memory();
+}
+
+void ThreadsExec::first_touch_allocate_thread_private_scratch(ThreadsExec &exec,
+                                                              const void *) {
+  exec.m_scratch_reduce_end = s_threads_process.m_scratch_reduce_end;
+  exec.m_scratch_thread_end = s_threads_process.m_scratch_thread_end;
+
+  if (s_threads_process.m_scratch_thread_end) {
+    // Allocate tracked memory:
+    {
+      using Record =
+          Kokkos::Impl::SharedAllocationRecord<Kokkos::HostSpace, void>;
+      Record *const r =
+          Record::allocate(Kokkos::HostSpace(), "Kokkos::thread_scratch",
+                           s_threads_process.m_scratch_thread_end);
+
+      Record::increment(r);
+
+      exec.m_scratch = r->data();
+    }
+
+    unsigned *ptr = reinterpret_cast<unsigned *>(exec.m_scratch);
+
+    unsigned *const end =
+        ptr + s_threads_process.m_scratch_thread_end / sizeof(unsigned);
+
+    // touch on this thread
+    while (ptr < end) *ptr++ = 0;
+  }
+}
+
+void *ThreadsExec::resize_scratch(size_t reduce_size, size_t thread_size) {
+  enum { ALIGN_MASK = Kokkos::Impl::MEMORY_ALIGNMENT - 1 };
+
+  fence();
+
+  const size_t old_reduce_size = s_threads_process.m_scratch_reduce_end;
+  const size_t old_thread_size = s_threads_process.m_scratch_thread_end -
+                                 s_threads_process.m_scratch_reduce_end;
+
+  reduce_size = (reduce_size + ALIGN_MASK) & ~ALIGN_MASK;
+  thread_size = (thread_size + ALIGN_MASK) & ~ALIGN_MASK;
+
+  // Increase size or deallocate completely.
+
+  if ((old_reduce_size < reduce_size) || (old_thread_size < thread_size) ||
+      ((reduce_size == 0 && thread_size == 0) &&
+       (old_reduce_size != 0 || old_thread_size != 0))) {
+    verify_is_process("ThreadsExec::resize_scratch", true);
+
+    s_threads_process.m_scratch_reduce_end = reduce_size;
+    s_threads_process.m_scratch_thread_end = reduce_size + thread_size;
+
+    execute_resize_scratch_in_serial();
+
+    s_threads_process.m_scratch = s_threads_exec[0]->m_scratch;
+  }
+
+  return s_threads_process.m_scratch;
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadsExec::print_configuration(std::ostream &s, const bool detail) {
+  verify_is_process("ThreadsExec::print_configuration", false);
+
+  fence();
+
+  const unsigned numa_count     = Kokkos::hwloc::get_available_numa_count();
+  const unsigned cores_per_numa = Kokkos::hwloc::get_available_cores_per_numa();
+  const unsigned threads_per_core =
+      Kokkos::hwloc::get_available_threads_per_core();
+
+  // Forestall compiler warnings for unused variables.
+  (void)numa_count;
+  (void)cores_per_numa;
+  (void)threads_per_core;
+
+  s << "Kokkos::Threads";
+
+#if defined(KOKKOS_ENABLE_THREADS)
+  s << " KOKKOS_ENABLE_THREADS";
+#endif
+#if defined(KOKKOS_ENABLE_HWLOC)
+  s << " hwloc[" << numa_count << "x" << cores_per_numa << "x"
+    << threads_per_core << "]";
+#endif
+
+  if (s_thread_pool_size[0]) {
+    s << " threads[" << s_thread_pool_size[0] << "]"
+      << " threads_per_numa[" << s_thread_pool_size[1] << "]"
+      << " threads_per_core[" << s_thread_pool_size[2] << "]";
+    if (nullptr == s_threads_process.m_pool_base) {
+      s << " Asynchronous";
+    }
+    s << " ReduceScratch[" << s_current_reduce_size << "]"
+      << " SharedScratch[" << s_current_shared_size << "]";
+    s << std::endl;
+
+    if (detail) {
+      for (int i = 0; i < s_thread_pool_size[0]; ++i) {
+        ThreadsExec *const th = s_threads_exec[i];
+
+        if (th) {
+          const int rank_rev = th->m_pool_size - (th->m_pool_rank + 1);
+
+          s << " Thread[ " << th->m_pool_rank << " : " << th->m_numa_rank << "."
+            << th->m_numa_core_rank << " ]";
+
+          s << " Fan{";
+          for (int j = 0; j < th->m_pool_fan_size; ++j) {
+            ThreadsExec *const thfan = th->m_pool_base[rank_rev + (1 << j)];
+            s << " [ " << thfan->m_pool_rank << " : " << thfan->m_numa_rank
+              << "." << thfan->m_numa_core_rank << " ]";
+          }
+          s << " }";
+
+          if (th == &s_threads_process) {
+            s << " is_process";
+          }
+        }
+        s << std::endl;
+      }
+    }
+  } else {
+    s << " not initialized" << std::endl;
+  }
+}
+
+//----------------------------------------------------------------------------
+
+int ThreadsExec::is_initialized() { return nullptr != s_threads_exec[0]; }
+
+void ThreadsExec::initialize(int thread_count_arg) {
+  // legacy arguments
+  unsigned thread_count       = thread_count_arg == -1 ? 0 : thread_count_arg;
+  unsigned use_numa_count     = 0;
+  unsigned use_cores_per_numa = 0;
+  bool allow_asynchronous_threadpool = false;
+  // need to provide an initializer for Intel compilers
+  static const Sentinel sentinel = {};
+
+  const bool is_initialized = 0 != s_thread_pool_size[0];
+
+  unsigned thread_spawn_failed = 0;
+
+  for (int i = 0; i < ThreadsExec::MAX_THREAD_COUNT; i++)
+    s_threads_exec[i] = nullptr;
+
+  if (!is_initialized) {
+    // If thread_count, use_numa_count, or use_cores_per_numa are zero
+    // then they will be given default values based upon hwloc detection
+    // and allowed asynchronous execution.
+
+    const bool hwloc_avail = Kokkos::hwloc::available();
+    const bool hwloc_can_bind =
+        hwloc_avail && Kokkos::hwloc::can_bind_threads();
+
+    if (thread_count == 0) {
+      thread_count = hwloc_avail
+                         ? Kokkos::hwloc::get_available_numa_count() *
+                               Kokkos::hwloc::get_available_cores_per_numa() *
+                               Kokkos::hwloc::get_available_threads_per_core()
+                         : 1;
+    }
+
+    const unsigned thread_spawn_begin = hwloc::thread_mapping(
+        "Kokkos::Threads::initialize", allow_asynchronous_threadpool,
+        thread_count, use_numa_count, use_cores_per_numa, s_threads_coord);
+
+    const std::pair<unsigned, unsigned> proc_coord = s_threads_coord[0];
+
+    if (thread_spawn_begin) {
+      // Synchronous with s_threads_coord[0] as the process core
+      // Claim entry #0 for binding the process core.
+      s_threads_coord[0] = std::pair<unsigned, unsigned>(~0u, ~0u);
+    }
+
+    s_thread_pool_size[0] = thread_count;
+    s_thread_pool_size[1] = s_thread_pool_size[0] / use_numa_count;
+    s_thread_pool_size[2] = s_thread_pool_size[1] / use_cores_per_numa;
+    s_current_function =
+        &execute_function_noop;  // Initialization work function
+
+    for (unsigned ith = thread_spawn_begin; ith < thread_count; ++ith) {
+      s_threads_process.m_pool_state = ThreadsExec::Inactive;
+
+      // If hwloc available then spawned thread will
+      // choose its own entry in 's_threads_coord'
+      // otherwise specify the entry.
+      s_current_function_arg =
+          reinterpret_cast<void *>(hwloc_can_bind ? ~0u : ith);
+
+      // Make sure all outstanding memory writes are complete
+      // before spawning the new thread.
+      memory_fence();
+
+      // Spawn thread executing the 'driver()' function.
+      // Wait until spawned thread has attempted to initialize.
+      // If spawning and initialization is successful then
+      // an entry in 's_threads_exec' will be assigned.
+      ThreadsExec::spawn();
+      wait_yield(s_threads_process.m_pool_state, ThreadsExec::Inactive);
+      if (s_threads_process.m_pool_state == ThreadsExec::Terminating) break;
+    }
+
+    // Wait for all spawned threads to deactivate before zeroing the function.
+
+    for (unsigned ith = thread_spawn_begin; ith < thread_count; ++ith) {
+      // Try to protect against cache coherency failure by casting to volatile.
+      ThreadsExec *const th = ((ThreadsExec * volatile *)s_threads_exec)[ith];
+      if (th) {
+        wait_yield(th->m_pool_state, ThreadsExec::Active);
+      } else {
+        ++thread_spawn_failed;
+      }
+    }
+
+    s_current_function             = nullptr;
+    s_current_function_arg         = nullptr;
+    s_threads_process.m_pool_state = ThreadsExec::Inactive;
+
+    memory_fence();
+
+    if (!thread_spawn_failed) {
+      // Bind process to the core on which it was located before spawning
+      // occurred
+      if (hwloc_can_bind) {
+        Kokkos::hwloc::bind_this_thread(proc_coord);
+      }
+
+      if (thread_spawn_begin) {  // Include process in pool.
+        const std::pair<unsigned, unsigned> coord =
+            Kokkos::hwloc::get_this_thread_coordinate();
+
+        s_threads_exec[0]                  = &s_threads_process;
+        s_threads_process.m_numa_rank      = coord.first;
+        s_threads_process.m_numa_core_rank = coord.second;
+        s_threads_process.m_pool_base      = s_threads_exec;
+        s_threads_process.m_pool_rank =
+            thread_count - 1;  // Reversed for scan-compatible reductions
+        s_threads_process.m_pool_size     = thread_count;
+        s_threads_process.m_pool_fan_size = fan_size(
+            s_threads_process.m_pool_rank, s_threads_process.m_pool_size);
+        s_threads_pid[s_threads_process.m_pool_rank] =
+            std::this_thread::get_id();
+      } else {
+        s_threads_process.m_pool_base     = nullptr;
+        s_threads_process.m_pool_rank     = 0;
+        s_threads_process.m_pool_size     = 0;
+        s_threads_process.m_pool_fan_size = 0;
+      }
+
+      // Initial allocations:
+      ThreadsExec::resize_scratch(1024, 1024);
+    } else {
+      s_thread_pool_size[0] = 0;
+      s_thread_pool_size[1] = 0;
+      s_thread_pool_size[2] = 0;
+    }
+  }
+
+  if (is_initialized || thread_spawn_failed) {
+    std::ostringstream msg;
+
+    msg << "Kokkos::Threads::initialize ERROR";
+
+    if (is_initialized) {
+      msg << " : already initialized";
+    }
+    if (thread_spawn_failed) {
+      msg << " : failed to spawn " << thread_spawn_failed << " threads";
+    }
+
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+
+  // Check for over-subscription
+  if (Kokkos::show_warnings() &&
+      (Impl::mpi_ranks_per_node() * long(thread_count) >
+       Impl::processors_per_node())) {
+    std::cerr << "Kokkos::Threads::initialize WARNING: You are likely "
+                 "oversubscribing your CPU cores."
+              << std::endl;
+    std::cerr << "                                    Detected: "
+              << Impl::processors_per_node() << " cores per node." << std::endl;
+    std::cerr << "                                    Detected: "
+              << Impl::mpi_ranks_per_node() << " MPI_ranks per node."
+              << std::endl;
+    std::cerr << "                                    Requested: "
+              << thread_count << " threads per process." << std::endl;
+  }
+
+  // Init the array for used for arbitrarily sized atomics
+  Impl::init_lock_array_host_space();
+
+  Impl::SharedAllocationRecord<void, void>::tracking_enable();
+}
+
+//----------------------------------------------------------------------------
+
+void ThreadsExec::finalize() {
+  verify_is_process("ThreadsExec::finalize", false);
+
+  fence();
+
+  resize_scratch(0, 0);
+
+  const unsigned begin = s_threads_process.m_pool_base ? 1 : 0;
+
+  for (unsigned i = s_thread_pool_size[0]; begin < i--;) {
+    if (s_threads_exec[i]) {
+      s_threads_exec[i]->m_pool_state = ThreadsExec::Terminating;
+
+      wait_yield(s_threads_process.m_pool_state, ThreadsExec::Inactive);
+
+      s_threads_process.m_pool_state = ThreadsExec::Inactive;
+    }
+
+    s_threads_pid[i] = std::thread::id();
+  }
+
+  if (s_threads_process.m_pool_base) {
+    (&s_threads_process)->~ThreadsExec();
+    s_threads_exec[0] = nullptr;
+  }
+
+  if (Kokkos::hwloc::can_bind_threads()) {
+    Kokkos::hwloc::unbind_this_thread();
+  }
+
+  s_thread_pool_size[0] = 0;
+  s_thread_pool_size[1] = 0;
+  s_thread_pool_size[2] = 0;
+
+  // Reset master thread to run solo.
+  s_threads_process.m_numa_rank      = 0;
+  s_threads_process.m_numa_core_rank = 0;
+  s_threads_process.m_pool_base      = nullptr;
+  s_threads_process.m_pool_rank      = 0;
+  s_threads_process.m_pool_size      = 1;
+  s_threads_process.m_pool_fan_size  = 0;
+  s_threads_process.m_pool_state     = ThreadsExec::Inactive;
+
+  Kokkos::Profiling::finalize();
+}
+
+//----------------------------------------------------------------------------
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+int Threads::concurrency() { return impl_thread_pool_size(0); }
+void Threads::fence(const std::string &name) const {
+  Impl::ThreadsExec::internal_fence(name, Impl::fence_is_static::no);
+}
+
+Threads &Threads::impl_instance(int) {
+  static Threads t;
+  return t;
+}
+
+int Threads::impl_thread_pool_rank_host() {
+  const std::thread::id pid = std::this_thread::get_id();
+  int i                     = 0;
+  while ((i < Impl::s_thread_pool_size[0]) && (pid != Impl::s_threads_pid[i])) {
+    ++i;
+  }
+  return i;
+}
+
+int Threads::impl_thread_pool_size(int depth) {
+  return Impl::s_thread_pool_size[depth];
+}
+
+const char *Threads::name() { return "Threads"; }
+
+namespace Impl {
+
+int g_threads_space_factory_initialized =
+    initialize_space_factory<Threads>("050_Threads");
+
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_CXX14
+namespace Tools {
+namespace Experimental {
+constexpr DeviceType DeviceTypeTraits<Threads>::id;
+}
+}  // namespace Tools
+#endif
+
+} /* namespace Kokkos */
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsExec.hpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsExec.hpp
new file mode 100644 (file)
index 0000000..238a765
--- /dev/null
@@ -0,0 +1,637 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADSEXEC_HPP
+#define KOKKOS_THREADSEXEC_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <cstdio>
+
+#include <utility>
+#include <impl/Kokkos_Spinwait.hpp>
+
+#include <Kokkos_Atomic.hpp>
+
+#include <impl/Kokkos_ConcurrentBitset.hpp>
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+class ThreadsExec {
+ public:
+  // Fan array has log_2(NT) reduction threads plus 2 scan threads
+  // Currently limited to 16k threads.
+  enum { MAX_FAN_COUNT = 16 };
+  enum { MAX_THREAD_COUNT = 1 << (MAX_FAN_COUNT - 2) };
+  enum { VECTOR_LENGTH = 8 };
+
+  /** \brief States of a worker thread */
+  enum {
+    Terminating  ///<  Termination in progress
+    ,
+    Inactive  ///<  Exists, waiting for work
+    ,
+    Active  ///<  Exists, performing work
+    ,
+    Rendezvous  ///<  Exists, waiting in a barrier or reduce
+
+    ,
+    ScanCompleted,
+    ScanAvailable,
+    ReductionAvailable
+  };
+
+ private:
+  friend class Kokkos::Threads;
+
+  // Fan-in operations' root is the highest ranking thread
+  // to place the 'scan' reduction intermediate values on
+  // the threads that need them.
+  // For a simple reduction the thread location is arbitrary.
+
+  ThreadsExec *const *m_pool_base;  ///< Base for pool fan-in
+
+  void *m_scratch;
+  int m_scratch_reduce_end;
+  size_t m_scratch_thread_end;
+  int m_numa_rank;
+  int m_numa_core_rank;
+  int m_pool_rank;
+  int m_pool_rank_rev;
+  int m_pool_size;
+  int m_pool_fan_size;
+  int volatile m_pool_state;  ///< State for global synchronizations
+
+  // Members for dynamic scheduling
+  // Which thread am I stealing from currently
+  int m_current_steal_target;
+  // This thread's owned work_range
+  Kokkos::pair<long, long> m_work_range __attribute__((aligned(16)));
+  // Team Offset if one thread determines work_range for others
+  long m_team_work_index;
+
+  // Is this thread stealing (i.e. its owned work_range is exhausted
+  bool m_stealing;
+
+  static void global_lock();
+  static void global_unlock();
+  static void spawn();
+
+  static void first_touch_allocate_thread_private_scratch(ThreadsExec &,
+                                                          const void *);
+  static void execute_sleep(ThreadsExec &, const void *);
+
+  ThreadsExec(const ThreadsExec &);
+  ThreadsExec &operator=(const ThreadsExec &);
+
+  static void execute_resize_scratch_in_serial();
+
+ public:
+  KOKKOS_INLINE_FUNCTION int pool_size() const { return m_pool_size; }
+  KOKKOS_INLINE_FUNCTION int pool_rank() const { return m_pool_rank; }
+  KOKKOS_INLINE_FUNCTION int numa_rank() const { return m_numa_rank; }
+  KOKKOS_INLINE_FUNCTION int numa_core_rank() const { return m_numa_core_rank; }
+  inline long team_work_index() const { return m_team_work_index; }
+
+  static int get_thread_count();
+  static ThreadsExec *get_thread(const int init_thread_rank);
+
+  inline void *reduce_memory() const { return m_scratch; }
+  KOKKOS_INLINE_FUNCTION void *scratch_memory() const {
+    return reinterpret_cast<unsigned char *>(m_scratch) + m_scratch_reduce_end;
+  }
+
+  KOKKOS_INLINE_FUNCTION int volatile &state() { return m_pool_state; }
+  KOKKOS_INLINE_FUNCTION ThreadsExec *const *pool_base() const {
+    return m_pool_base;
+  }
+
+  static void driver(void);
+
+  ~ThreadsExec();
+  ThreadsExec();
+
+  static void *resize_scratch(size_t reduce_size, size_t thread_size);
+
+  static void *root_reduce_scratch();
+
+  static bool is_process();
+
+  static void verify_is_process(const std::string &, const bool initialized);
+
+  static int is_initialized();
+
+  static void initialize(int thread_count);
+
+  static void finalize();
+
+  /* Given a requested team size, return valid team size */
+  static unsigned team_size_valid(unsigned);
+
+  static void print_configuration(std::ostream &, const bool detail = false);
+
+  //------------------------------------
+
+  static void wait_yield(volatile int &, const int);
+
+  //------------------------------------
+  // All-thread functions:
+
+  inline int all_reduce(const int value) {
+    // Make sure there is enough scratch space:
+    const int rev_rank = m_pool_size - (m_pool_rank + 1);
+
+    *static_cast<volatile int *>(reduce_memory()) = value;
+
+    memory_fence();
+
+    // Fan-in reduction with highest ranking thread as the root
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      // Wait: Active -> Rendezvous
+      Impl::spinwait_while_equal<int>(
+          m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+    }
+
+    if (rev_rank) {
+      m_pool_state = ThreadsExec::Rendezvous;
+      // Wait: Rendezvous -> Active
+      Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::Rendezvous);
+    } else {
+      // Root thread does the reduction and broadcast
+
+      int accum = 0;
+
+      for (int rank = 0; rank < m_pool_size; ++rank) {
+        accum +=
+            *static_cast<volatile int *>(get_thread(rank)->reduce_memory());
+      }
+
+      for (int rank = 0; rank < m_pool_size; ++rank) {
+        *static_cast<volatile int *>(get_thread(rank)->reduce_memory()) = accum;
+      }
+
+      memory_fence();
+
+      for (int rank = 0; rank < m_pool_size; ++rank) {
+        get_thread(rank)->m_pool_state = ThreadsExec::Active;
+      }
+    }
+
+    return *static_cast<volatile int *>(reduce_memory());
+  }
+
+  inline void barrier() {
+    // Make sure there is enough scratch space:
+    const int rev_rank = m_pool_size - (m_pool_rank + 1);
+
+    memory_fence();
+
+    // Fan-in reduction with highest ranking thread as the root
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      // Wait: Active -> Rendezvous
+      Impl::spinwait_while_equal<int>(
+          m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+    }
+
+    if (rev_rank) {
+      m_pool_state = ThreadsExec::Rendezvous;
+      // Wait: Rendezvous -> Active
+      Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::Rendezvous);
+    } else {
+      // Root thread does the reduction and broadcast
+
+      memory_fence();
+
+      for (int rank = 0; rank < m_pool_size; ++rank) {
+        get_thread(rank)->m_pool_state = ThreadsExec::Active;
+      }
+    }
+  }
+
+  //------------------------------------
+  // All-thread functions:
+
+  template <class FunctorType>
+  inline void fan_in_reduce(const FunctorType &f) const {
+    const int rev_rank = m_pool_size - (m_pool_rank + 1);
+
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      ThreadsExec &fan = *m_pool_base[rev_rank + (1 << i)];
+
+      Impl::spinwait_while_equal<int>(fan.m_pool_state, ThreadsExec::Active);
+
+      f.join(
+          reinterpret_cast<typename FunctorType::value_type *>(reduce_memory()),
+          reinterpret_cast<const typename FunctorType::value_type *>(
+              fan.reduce_memory()));
+    }
+
+    if (!rev_rank) {
+      f.final(reinterpret_cast<typename FunctorType::value_type *>(
+          reduce_memory()));
+    }
+
+    //  This thread has updated 'reduce_memory()' and upon returning
+    //  from this function will set 'm_pool_state' to inactive.
+    //  If this is a non-root thread then setting 'm_pool_state'
+    //  to inactive triggers another thread to exit a spinwait
+    //  and read the 'reduce_memory'.
+    //  Must 'memory_fence()' to guarantee that storing the update to
+    //  'reduce_memory()' will complete before storing the the update to
+    //  'm_pool_state'.
+
+    memory_fence();
+  }
+
+  inline void fan_in() const {
+    const int rev_rank = m_pool_size - (m_pool_rank + 1);
+
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      Impl::spinwait_while_equal<int>(
+          m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+    }
+  }
+
+  template <class FunctorType>
+  inline void scan_large(const FunctorType &f) {
+    // Sequence of states:
+    //  0) Active             : entry and exit state
+    //  1) ReductionAvailable : reduction value available
+    //  2) ScanAvailable      : inclusive scan value available
+    //  3) Rendezvous         : All threads inclusive scan value are available
+    //  4) ScanCompleted      : exclusive scan value copied
+
+    using scalar_type = typename FunctorType::value_type;
+
+    const int rev_rank   = m_pool_size - (m_pool_rank + 1);
+    const unsigned count = FunctorType::value_count(f);
+
+    scalar_type *const work_value = (scalar_type *)reduce_memory();
+
+    //--------------------------------
+    // Fan-in reduction with highest ranking thread as the root
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      ThreadsExec &fan = *m_pool_base[rev_rank + (1 << i)];
+
+      // Wait: Active -> ReductionAvailable (or ScanAvailable)
+      Impl::spinwait_while_equal<int>(fan.m_pool_state, ThreadsExec::Active);
+      f.join(work_value, fan.reduce_memory());
+    }
+
+    // Copy reduction value to scan value before releasing from this phase.
+    for (unsigned i = 0; i < count; ++i) {
+      work_value[i + count] = work_value[i];
+    }
+
+    if (rev_rank) {
+      // Set: Active -> ReductionAvailable
+      m_pool_state = ThreadsExec::ReductionAvailable;
+
+      // Wait for contributing threads' scan value to be available.
+      if ((1 << m_pool_fan_size) < (m_pool_rank + 1)) {
+        ThreadsExec &th = *m_pool_base[rev_rank + (1 << m_pool_fan_size)];
+
+        // Wait: Active             -> ReductionAvailable
+        // Wait: ReductionAvailable -> ScanAvailable
+        Impl::spinwait_while_equal<int>(th.m_pool_state, ThreadsExec::Active);
+        Impl::spinwait_while_equal<int>(th.m_pool_state,
+                                        ThreadsExec::ReductionAvailable);
+
+        f.join(work_value + count, ((scalar_type *)th.reduce_memory()) + count);
+      }
+
+      // This thread has completed inclusive scan
+      // Set: ReductionAvailable -> ScanAvailable
+      m_pool_state = ThreadsExec::ScanAvailable;
+
+      // Wait for all threads to complete inclusive scan
+      // Wait: ScanAvailable -> Rendezvous
+      Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::ScanAvailable);
+    }
+
+    //--------------------------------
+
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      ThreadsExec &fan = *m_pool_base[rev_rank + (1 << i)];
+      // Wait: ReductionAvailable -> ScanAvailable
+      Impl::spinwait_while_equal<int>(fan.m_pool_state,
+                                      ThreadsExec::ReductionAvailable);
+      // Set: ScanAvailable -> Rendezvous
+      fan.m_pool_state = ThreadsExec::Rendezvous;
+    }
+
+    // All threads have completed the inclusive scan.
+    // All non-root threads are in the Rendezvous state.
+    // Threads are free to overwrite their reduction value.
+    //--------------------------------
+
+    if ((rev_rank + 1) < m_pool_size) {
+      // Exclusive scan: copy the previous thread's inclusive scan value
+
+      ThreadsExec &th = *m_pool_base[rev_rank + 1];  // Not the root thread
+
+      const scalar_type *const src_value =
+          ((scalar_type *)th.reduce_memory()) + count;
+
+      for (unsigned j = 0; j < count; ++j) {
+        work_value[j] = src_value[j];
+      }
+    } else {
+      f.init(work_value);
+    }
+
+    //--------------------------------
+    // Wait for all threads to copy previous thread's inclusive scan value
+    // Wait for all threads: Rendezvous -> ScanCompleted
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      Impl::spinwait_while_equal<int>(
+          m_pool_base[rev_rank + (1 << i)]->m_pool_state,
+          ThreadsExec::Rendezvous);
+    }
+    if (rev_rank) {
+      // Set: ScanAvailable -> ScanCompleted
+      m_pool_state = ThreadsExec::ScanCompleted;
+      // Wait: ScanCompleted -> Active
+      Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::ScanCompleted);
+    }
+    // Set: ScanCompleted -> Active
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      m_pool_base[rev_rank + (1 << i)]->m_pool_state = ThreadsExec::Active;
+    }
+  }
+
+  template <class FunctorType>
+  inline void scan_small(const FunctorType &f) {
+    using scalar_type = typename FunctorType::value_type;
+
+    const int rev_rank   = m_pool_size - (m_pool_rank + 1);
+    const unsigned count = f.length();
+
+    scalar_type *const work_value = (scalar_type *)reduce_memory();
+
+    //--------------------------------
+    // Fan-in reduction with highest ranking thread as the root
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      // Wait: Active -> Rendezvous
+      Impl::spinwait_while_equal<int>(
+          m_pool_base[rev_rank + (1 << i)]->m_pool_state, ThreadsExec::Active);
+    }
+
+    for (unsigned i = 0; i < count; ++i) {
+      work_value[i + count] = work_value[i];
+    }
+
+    if (rev_rank) {
+      m_pool_state = ThreadsExec::Rendezvous;
+      // Wait: Rendezvous -> Active
+      Impl::spinwait_while_equal<int>(m_pool_state, ThreadsExec::Rendezvous);
+    } else {
+      // Root thread does the thread-scan before releasing threads
+
+      scalar_type *ptr_prev = nullptr;
+
+      for (int rank = 0; rank < m_pool_size; ++rank) {
+        scalar_type *const ptr =
+            (scalar_type *)get_thread(rank)->reduce_memory();
+        if (rank) {
+          for (unsigned i = 0; i < count; ++i) {
+            ptr[i] = ptr_prev[i + count];
+          }
+          f.join(ptr + count, ptr);
+        } else {
+          f.init(ptr);
+        }
+        ptr_prev = ptr;
+      }
+    }
+
+    for (int i = 0; i < m_pool_fan_size; ++i) {
+      m_pool_base[rev_rank + (1 << i)]->m_pool_state = ThreadsExec::Active;
+    }
+  }
+
+  //------------------------------------
+  /** \brief  Wait for previous asynchronous functor to
+   *          complete and release the Threads device.
+   *          Acquire the Threads device and start this functor.
+   */
+  static void start(void (*)(ThreadsExec &, const void *), const void *);
+
+  static int in_parallel();
+  static void fence();
+  static void fence(const std::string &);
+  static void internal_fence(
+      Impl::fence_is_static is_static = Impl::fence_is_static::yes);
+  static void internal_fence(
+      const std::string &,
+      Impl::fence_is_static is_static = Impl::fence_is_static::yes);
+  static bool sleep();
+  static bool wake();
+
+  /* Dynamic Scheduling related functionality */
+  // Initialize the work range for this thread
+  inline void set_work_range(const long &begin, const long &end,
+                             const long &chunk_size) {
+    m_work_range.first = (begin + chunk_size - 1) / chunk_size;
+    m_work_range.second =
+        end > 0 ? (end + chunk_size - 1) / chunk_size : m_work_range.first;
+  }
+
+  // Claim and index from this thread's range from the beginning
+  inline long get_work_index_begin() {
+    Kokkos::pair<long, long> work_range_new = m_work_range;
+    Kokkos::pair<long, long> work_range_old = work_range_new;
+    if (work_range_old.first >= work_range_old.second) return -1;
+
+    work_range_new.first += 1;
+
+    bool success = false;
+    while (!success) {
+      work_range_new = Kokkos::atomic_compare_exchange(
+          &m_work_range, work_range_old, work_range_new);
+      success        = ((work_range_new == work_range_old) ||
+                 (work_range_new.first >= work_range_new.second));
+      work_range_old = work_range_new;
+      work_range_new.first += 1;
+    }
+    if (work_range_old.first < work_range_old.second)
+      return work_range_old.first;
+    else
+      return -1;
+  }
+
+  // Claim and index from this thread's range from the end
+  inline long get_work_index_end() {
+    Kokkos::pair<long, long> work_range_new = m_work_range;
+    Kokkos::pair<long, long> work_range_old = work_range_new;
+    if (work_range_old.first >= work_range_old.second) return -1;
+    work_range_new.second -= 1;
+    bool success = false;
+    while (!success) {
+      work_range_new = Kokkos::atomic_compare_exchange(
+          &m_work_range, work_range_old, work_range_new);
+      success        = ((work_range_new == work_range_old) ||
+                 (work_range_new.first >= work_range_new.second));
+      work_range_old = work_range_new;
+      work_range_new.second -= 1;
+    }
+    if (work_range_old.first < work_range_old.second)
+      return work_range_old.second - 1;
+    else
+      return -1;
+  }
+
+  // Reset the steal target
+  inline void reset_steal_target() {
+    m_current_steal_target = (m_pool_rank + 1) % pool_size();
+    m_stealing             = false;
+  }
+
+  // Reset the steal target
+  inline void reset_steal_target(int team_size) {
+    m_current_steal_target = (m_pool_rank_rev + team_size);
+    if (m_current_steal_target >= pool_size())
+      m_current_steal_target = 0;  // pool_size()-1;
+    m_stealing = false;
+  }
+
+  // Get a steal target; start with my-rank + 1 and go round robin, until
+  // arriving at this threads rank Returns -1 fi no active steal target
+  // available
+  inline int get_steal_target() {
+    while ((m_pool_base[m_current_steal_target]->m_work_range.second <=
+            m_pool_base[m_current_steal_target]->m_work_range.first) &&
+           (m_current_steal_target != m_pool_rank)) {
+      m_current_steal_target = (m_current_steal_target + 1) % pool_size();
+    }
+    if (m_current_steal_target == m_pool_rank)
+      return -1;
+    else
+      return m_current_steal_target;
+  }
+
+  inline int get_steal_target(int team_size) {
+    while ((m_pool_base[m_current_steal_target]->m_work_range.second <=
+            m_pool_base[m_current_steal_target]->m_work_range.first) &&
+           (m_current_steal_target != m_pool_rank_rev)) {
+      if (m_current_steal_target + team_size < pool_size())
+        m_current_steal_target = (m_current_steal_target + team_size);
+      else
+        m_current_steal_target = 0;
+    }
+
+    if (m_current_steal_target == m_pool_rank_rev)
+      return -1;
+    else
+      return m_current_steal_target;
+  }
+
+  inline long steal_work_index(int team_size = 0) {
+    long index = -1;
+    int steal_target =
+        team_size > 0 ? get_steal_target(team_size) : get_steal_target();
+    while ((steal_target != -1) && (index == -1)) {
+      index = m_pool_base[steal_target]->get_work_index_end();
+      if (index == -1)
+        steal_target =
+            team_size > 0 ? get_steal_target(team_size) : get_steal_target();
+    }
+    return index;
+  }
+
+  // Get a work index. Claim from owned range until its exhausted, then steal
+  // from other thread
+  inline long get_work_index(int team_size = 0) {
+    long work_index = -1;
+    if (!m_stealing) work_index = get_work_index_begin();
+
+    if (work_index == -1) {
+      memory_fence();
+      m_stealing = true;
+      work_index = steal_work_index(team_size);
+    }
+
+    m_team_work_index = work_index;
+    memory_fence();
+    return work_index;
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+inline int Threads::in_parallel() { return Impl::ThreadsExec::in_parallel(); }
+
+inline int Threads::impl_is_initialized() {
+  return Impl::ThreadsExec::is_initialized();
+}
+
+inline void Threads::impl_initialize(InitializationSettings const &settings) {
+  Impl::ThreadsExec::initialize(
+      settings.has_num_threads() ? settings.get_num_threads() : -1);
+}
+
+inline void Threads::impl_finalize() { Impl::ThreadsExec::finalize(); }
+
+inline void Threads::print_configuration(std::ostream &os, bool verbose) const {
+  os << "Host Parallel Execution Space:\n";
+  os << "  KOKKOS_ENABLE_THREADS: yes\n";
+
+  os << "\nThreads Runtime Configuration:\n";
+  Impl::ThreadsExec::print_configuration(os, verbose);
+}
+
+inline void Threads::impl_static_fence(const std::string &name) {
+  Impl::ThreadsExec::internal_fence(name, Impl::fence_is_static::yes);
+}
+} /* namespace Kokkos */
+
+#endif /* #define KOKKOS_THREADSEXEC_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsTeam.hpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_ThreadsTeam.hpp
new file mode 100644 (file)
index 0000000..02ce932
--- /dev/null
@@ -0,0 +1,1131 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADSTEAM_HPP
+#define KOKKOS_THREADSTEAM_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <cstdio>
+
+#include <utility>
+#include <impl/Kokkos_Spinwait.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+
+#include <Kokkos_Atomic.hpp>
+
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+
+template <class>
+struct ThreadsExecAdapter;
+
+//----------------------------------------------------------------------------
+
+class ThreadsExecTeamMember {
+ private:
+  enum { TEAM_REDUCE_SIZE = 512 };
+
+ public:
+  using execution_space      = Kokkos::Threads;
+  using scratch_memory_space = execution_space::scratch_memory_space;
+
+ private:
+  using space = execution_space::scratch_memory_space;
+  ThreadsExec* const m_exec;
+  ThreadsExec* const* m_team_base;  ///< Base for team fan-in
+  space m_team_shared;
+  size_t m_team_shared_size;
+  int m_team_size;
+  int m_team_rank;
+  int m_team_rank_rev;
+  int m_league_size;
+  int m_league_end;
+  int m_league_rank;
+
+  int m_chunk_size;
+  int m_league_chunk_end;
+
+  int m_invalid_thread;
+  int m_team_alloc;
+
+  inline void set_team_shared() {
+    new (&m_team_shared) space(
+        static_cast<char*>((*m_team_base)->scratch_memory()) + TEAM_REDUCE_SIZE,
+        m_team_shared_size);
+  }
+
+ public:
+  // Fan-in and wait until the matching fan-out is called.
+  // The root thread which does not wait will return true.
+  // All other threads will return false during the fan-out.
+  KOKKOS_INLINE_FUNCTION bool team_fan_in() const {
+    int n, j;
+
+    // Wait for fan-in threads
+    for (n = 1;
+         (!(m_team_rank_rev & n)) && ((j = m_team_rank_rev + n) < m_team_size);
+         n <<= 1) {
+      Impl::spinwait_while_equal<int>(m_team_base[j]->state(),
+                                      ThreadsExec::Active);
+    }
+
+    // If not root then wait for release
+    if (m_team_rank_rev) {
+      m_exec->state() = ThreadsExec::Rendezvous;
+      Impl::spinwait_while_equal<int>(m_exec->state(), ThreadsExec::Rendezvous);
+    }
+
+    return !m_team_rank_rev;
+  }
+
+  KOKKOS_INLINE_FUNCTION void team_fan_out() const {
+    int n, j;
+    for (n = 1;
+         (!(m_team_rank_rev & n)) && ((j = m_team_rank_rev + n) < m_team_size);
+         n <<= 1) {
+      m_team_base[j]->state() = ThreadsExec::Active;
+    }
+  }
+
+ public:
+  KOKKOS_INLINE_FUNCTION static int team_reduce_size() {
+    return TEAM_REDUCE_SIZE;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_shmem() const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& team_scratch(int) const {
+    return m_team_shared.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const execution_space::scratch_memory_space& thread_scratch(int) const {
+    return m_team_shared.set_team_thread_mode(0, team_size(), team_rank());
+  }
+
+  KOKKOS_INLINE_FUNCTION int league_rank() const { return m_league_rank; }
+  KOKKOS_INLINE_FUNCTION int league_size() const { return m_league_size; }
+  KOKKOS_INLINE_FUNCTION int team_rank() const { return m_team_rank; }
+  KOKKOS_INLINE_FUNCTION int team_size() const { return m_team_size; }
+
+  KOKKOS_INLINE_FUNCTION void team_barrier() const {
+    team_fan_in();
+    team_fan_out();
+  }
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(ValueType& value,
+                                             const int& thread_id) const {
+    KOKKOS_IF_ON_DEVICE(((void)value; (void)thread_id;))
+
+    KOKKOS_IF_ON_HOST((
+        // Make sure there is enough scratch space:
+        using type = typename if_c<sizeof(ValueType) < TEAM_REDUCE_SIZE,
+                                   ValueType, void>::type;
+
+        if (m_team_base) {
+          type* const local_value = ((type*)m_team_base[0]->scratch_memory());
+          memory_fence();
+          team_barrier();
+          if (team_rank() == thread_id) *local_value = value;
+          memory_fence();
+          team_barrier();
+          value = *local_value;
+        }))
+  }
+
+  template <class Closure, class ValueType>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(Closure const& f, ValueType& value,
+                                             const int& thread_id) const {
+    KOKKOS_IF_ON_DEVICE(((void)f; (void)value; (void)thread_id;))
+
+    KOKKOS_IF_ON_HOST((
+        // Make sure there is enough scratch space:
+        using type = typename if_c<sizeof(ValueType) < TEAM_REDUCE_SIZE,
+                                   ValueType, void>::type;
+        f(value); if (m_team_base) {
+          type* const local_value = ((type*)m_team_base[0]->scratch_memory());
+          memory_fence();
+          team_barrier();
+          if (team_rank() == thread_id) *local_value = value;
+          memory_fence();
+          team_barrier();
+          value = *local_value;
+        }))
+  }
+
+  template <typename Type>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<!Kokkos::is_reducer<Type>::value, Type>
+      team_reduce(const Type& value) const {
+    KOKKOS_IF_ON_DEVICE((return value;))
+
+    KOKKOS_IF_ON_HOST((
+        // Make sure there is enough scratch space:
+        using type =
+            typename if_c<sizeof(Type) < TEAM_REDUCE_SIZE, Type, void>::type;
+
+        if (nullptr == m_exec) return value;
+
+        if (team_rank() != team_size() - 1) *
+            ((volatile type*)m_exec->scratch_memory()) = value;
+
+        memory_fence();
+
+        type& accum = *((type*)m_team_base[0]->scratch_memory());
+
+        if (team_fan_in()) {
+          accum = value;
+          for (int i = 1; i < m_team_size; ++i) {
+            accum += *((type*)m_team_base[i]->scratch_memory());
+          }
+          memory_fence();
+        }
+
+        team_fan_out();
+
+        return accum;))
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer) const noexcept {
+    team_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION
+      std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+      team_reduce(const ReducerType& reducer,
+                  const typename ReducerType::value_type contribution) const {
+    KOKKOS_IF_ON_DEVICE(((void)reducer; (void)contribution;))
+
+    KOKKOS_IF_ON_HOST((
+        using value_type = typename ReducerType::value_type;
+        // Make sure there is enough scratch space:
+        using type = typename if_c<sizeof(value_type) < TEAM_REDUCE_SIZE,
+                                   value_type, void>::type;
+
+        if (nullptr == m_exec) return;
+
+        type* const local_value = ((type*)m_exec->scratch_memory());
+
+        // Set this thread's contribution
+        if (team_rank() != team_size() - 1) { *local_value = contribution; }
+
+        // Fence to make sure the base team member has access:
+        memory_fence();
+
+        if (team_fan_in()) {
+          // The last thread to synchronize returns true, all other threads
+          // wait for team_fan_out()
+          type* const team_value = ((type*)m_team_base[0]->scratch_memory());
+
+          *team_value = contribution;
+          // Join to the team value:
+          for (int i = 1; i < m_team_size; ++i) {
+            reducer.join(*team_value,
+                         *((type*)m_team_base[i]->scratch_memory()));
+          }
+
+          // Team base thread may "lap" member threads so copy out to their
+          // local value.
+          for (int i = 1; i < m_team_size; ++i) {
+            *((type*)m_team_base[i]->scratch_memory()) = *team_value;
+          }
+
+          // Fence to make sure all team members have access
+          memory_fence();
+        }
+
+        team_fan_out();
+
+        // Value was changed by the team base
+        reducer.reference() = *local_value;))
+  }
+
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering
+   *          with intra-team non-deterministic ordering accumulation.
+   *
+   *  The global inter-team accumulation value will, at the end of the
+   *  league's parallel execution, be the scan's total.
+   *  Parallel execution ordering of the league's teams is non-deterministic.
+   *  As such the base value for each team's scan operation is similarly
+   *  non-deterministic.
+   */
+  template <typename ArgType>
+  KOKKOS_INLINE_FUNCTION ArgType team_scan(const ArgType& value,
+                                           ArgType* const global_accum) const {
+    KOKKOS_IF_ON_DEVICE(((void)global_accum; return value;))
+
+    KOKKOS_IF_ON_HOST((  // Make sure there is enough scratch space:
+        using type = typename if_c<sizeof(ArgType) < TEAM_REDUCE_SIZE, ArgType,
+                                   void>::type;
+
+        if (nullptr == m_exec) return type(0);
+
+        volatile type* const work_value = ((type*)m_exec->scratch_memory());
+
+        *work_value = value;
+
+        memory_fence();
+
+        if (team_fan_in()) {
+          // The last thread to synchronize returns true, all other threads wait
+          // for team_fan_out() m_team_base[0]                 == highest
+          // ranking team member m_team_base[ m_team_size - 1 ] == lowest
+          // ranking team member
+          //
+          // 1) copy from lower to higher rank, initialize lowest rank to zero
+          // 2) prefix sum from lowest to highest rank, skipping lowest rank
+
+          type accum = 0;
+
+          if (global_accum) {
+            for (int i = m_team_size; i--;) {
+              type& val = *((type*)m_team_base[i]->scratch_memory());
+              accum += val;
+            }
+            accum = atomic_fetch_add(global_accum, accum);
+          }
+
+          for (int i = m_team_size; i--;) {
+            type& val         = *((type*)m_team_base[i]->scratch_memory());
+            const type offset = accum;
+            accum += val;
+            val = offset;
+          }
+
+          memory_fence();
+        }
+
+        team_fan_out();
+
+        return *work_value;))
+  }
+
+  /** \brief  Intra-team exclusive prefix sum with team_rank() ordering.
+   *
+   *  The highest rank thread can compute the reduction total as
+   *    reduction_total = dev.team_scan( value ) + value ;
+   */
+  template <typename ArgType>
+  KOKKOS_INLINE_FUNCTION ArgType team_scan(const ArgType& value) const {
+    return this->template team_scan<ArgType>(value, nullptr);
+  }
+
+  //----------------------------------------
+  // Private for the driver
+
+  template <class... Properties>
+  ThreadsExecTeamMember(
+      Impl::ThreadsExec* exec,
+      const TeamPolicyInternal<Kokkos::Threads, Properties...>& team,
+      const size_t shared_size)
+      : m_exec(exec),
+        m_team_base(nullptr),
+        m_team_shared(nullptr, 0),
+        m_team_shared_size(shared_size),
+        m_team_size(team.team_size()),
+        m_team_rank(0),
+        m_team_rank_rev(0),
+        m_league_size(0),
+        m_league_end(0),
+        m_league_rank(0),
+        m_chunk_size(team.chunk_size()),
+        m_league_chunk_end(0),
+        m_team_alloc(team.team_alloc()) {
+    if (team.league_size()) {
+      // Execution is using device-team interface:
+
+      const int pool_rank_rev = m_exec->pool_size() - (m_exec->pool_rank() + 1);
+      const int team_rank_rev = pool_rank_rev % team.team_alloc();
+      const size_t pool_league_size = m_exec->pool_size() / team.team_alloc();
+      const size_t pool_league_rank_rev = pool_rank_rev / team.team_alloc();
+      if (pool_league_rank_rev >= pool_league_size) {
+        m_invalid_thread = 1;
+        return;
+      }
+      const size_t pool_league_rank =
+          pool_league_size - (pool_league_rank_rev + 1);
+
+      const int pool_num_teams = m_exec->pool_size() / team.team_alloc();
+      const int chunk_size =
+          team.chunk_size() > 0 ? team.chunk_size() : team.team_iter();
+      const int chunks_per_team =
+          (team.league_size() + chunk_size * pool_num_teams - 1) /
+          (chunk_size * pool_num_teams);
+      int league_iter_end = team.league_size() -
+                            pool_league_rank_rev * chunks_per_team * chunk_size;
+      int league_iter_begin = league_iter_end - chunks_per_team * chunk_size;
+      if (league_iter_begin < 0) league_iter_begin = 0;
+      if (league_iter_end > team.league_size())
+        league_iter_end = team.league_size();
+
+      if ((team.team_alloc() > size_t(m_team_size))
+              ? (team_rank_rev >= m_team_size)
+              : (m_exec->pool_size() - pool_num_teams * m_team_size >
+                 m_exec->pool_rank()))
+        m_invalid_thread = 1;
+      else
+        m_invalid_thread = 0;
+
+      // May be using fewer threads per team than a multiple of threads per
+      // core, some threads will idle.
+
+      if (team_rank_rev < team.team_size() && !m_invalid_thread) {
+        m_team_base =
+            m_exec->pool_base() + team.team_alloc() * pool_league_rank_rev;
+        m_team_size     = team.team_size();
+        m_team_rank     = team.team_size() - (team_rank_rev + 1);
+        m_team_rank_rev = team_rank_rev;
+        m_league_size   = team.league_size();
+
+        m_league_rank =
+            (team.league_size() * pool_league_rank) / pool_league_size;
+        m_league_end =
+            (team.league_size() * (pool_league_rank + 1)) / pool_league_size;
+
+        set_team_shared();
+      }
+
+      if ((m_team_rank_rev == 0) && (m_invalid_thread == 0)) {
+        m_exec->set_work_range(m_league_rank, m_league_end, m_chunk_size);
+        m_exec->reset_steal_target(m_team_size);
+      }
+      if (std::is_same<typename TeamPolicyInternal<
+                           Kokkos::Threads, Properties...>::schedule_type::type,
+                       Kokkos::Dynamic>::value) {
+        m_exec->barrier();
+      }
+    } else {
+      m_invalid_thread = 1;
+    }
+  }
+
+  ThreadsExecTeamMember()
+      : m_exec(nullptr),
+        m_team_base(nullptr),
+        m_team_shared(nullptr, 0),
+        m_team_shared_size(0),
+        m_team_size(1),
+        m_team_rank(0),
+        m_team_rank_rev(0),
+        m_league_size(1),
+        m_league_end(0),
+        m_league_rank(0),
+        m_chunk_size(0),
+        m_league_chunk_end(0),
+        m_invalid_thread(0),
+        m_team_alloc(0) {}
+
+  inline ThreadsExec& threads_exec_team_base() const {
+    return m_team_base ? **m_team_base : *m_exec;
+  }
+
+  bool valid_static() const { return m_league_rank < m_league_end; }
+
+  void next_static() {
+    if (m_league_rank < m_league_end) {
+      // Make sure all stores are complete before entering the barrier
+      memory_fence();
+      team_barrier();
+      set_team_shared();
+    }
+    m_league_rank++;
+  }
+
+  bool valid_dynamic() {
+    if (m_invalid_thread) return false;
+    if ((m_league_rank < m_league_chunk_end) &&
+        (m_league_rank < m_league_size)) {
+      return true;
+    }
+
+    if (m_team_rank_rev == 0) {
+      m_team_base[0]->get_work_index(m_team_alloc);
+    }
+    team_barrier();
+
+    long work_index = m_team_base[0]->team_work_index();
+
+    m_league_rank      = work_index * m_chunk_size;
+    m_league_chunk_end = (work_index + 1) * m_chunk_size;
+
+    if (m_league_chunk_end > m_league_size) m_league_chunk_end = m_league_size;
+
+    if ((m_league_rank >= 0) && (m_league_rank < m_league_chunk_end))
+      return true;
+    return false;
+  }
+
+  void next_dynamic() {
+    if (m_invalid_thread) return;
+
+    if (m_league_rank < m_league_chunk_end) {
+      // Make sure all stores are complete before entering the barrier
+      memory_fence();
+      team_barrier();
+      set_team_shared();
+    }
+    m_league_rank++;
+  }
+
+  void set_league_shmem(const int arg_league_rank, const int arg_league_size,
+                        const size_t arg_shmem_size) {
+    m_league_rank      = arg_league_rank;
+    m_league_size      = arg_league_size;
+    m_team_shared_size = arg_shmem_size;
+    set_team_shared();
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+template <class... Properties>
+class TeamPolicyInternal<Kokkos::Threads, Properties...>
+    : public PolicyTraits<Properties...> {
+ private:
+  int m_league_size;
+  int m_team_size;
+  int m_team_alloc;
+  int m_team_iter;
+
+  size_t m_team_scratch_size[2];
+  size_t m_thread_scratch_size[2];
+
+  int m_chunk_size;
+
+  bool m_tune_team_size;
+  bool m_tune_vector_length;
+
+  inline void init(const int league_size_request, const int team_size_request) {
+    const int pool_size = traits::execution_space::impl_thread_pool_size(0);
+    const int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
+    const int team_max =
+        pool_size < max_host_team_size ? pool_size : max_host_team_size;
+    const int team_grain = traits::execution_space::impl_thread_pool_size(2);
+
+    m_league_size = league_size_request;
+
+    if (team_size_request > team_max)
+      Kokkos::abort("Kokkos::abort: Requested Team Size is too large!");
+
+    m_team_size = team_size_request < team_max ? team_size_request : team_max;
+
+    // Round team size up to a multiple of 'team_gain'
+    const int team_size_grain =
+        (m_team_size + team_grain - 1 <= 0)
+            ? 1
+            : team_grain * ((m_team_size + team_grain - 1) / team_grain);
+    const int team_count = pool_size / team_size_grain;
+
+    // Constraint : pool_size = m_team_alloc * team_count
+    m_team_alloc = pool_size / team_count;
+
+    // Maxumum number of iterations each team will take:
+    m_team_iter = (m_league_size + team_count - 1) / team_count;
+
+    set_auto_chunk_size();
+  }
+
+ public:
+  //! Tag this class as a kokkos execution policy
+  //! Tag this class as a kokkos execution policy
+  using execution_policy = TeamPolicyInternal;
+
+  using traits = PolicyTraits<Properties...>;
+
+  const typename traits::execution_space& space() const {
+    static typename traits::execution_space m_space;
+    return m_space;
+  }
+
+  template <class ExecSpace, class... OtherProperties>
+  friend class TeamPolicyInternal;
+
+  template <class... OtherProperties>
+  TeamPolicyInternal(
+      const TeamPolicyInternal<Kokkos::Threads, OtherProperties...>& p) {
+    m_league_size            = p.m_league_size;
+    m_team_size              = p.m_team_size;
+    m_team_alloc             = p.m_team_alloc;
+    m_team_iter              = p.m_team_iter;
+    m_team_scratch_size[0]   = p.m_team_scratch_size[0];
+    m_thread_scratch_size[0] = p.m_thread_scratch_size[0];
+    m_team_scratch_size[1]   = p.m_team_scratch_size[1];
+    m_thread_scratch_size[1] = p.m_thread_scratch_size[1];
+    m_chunk_size             = p.m_chunk_size;
+    m_tune_team_size         = p.m_tune_team_size;
+    m_tune_vector_length     = p.m_tune_vector_length;
+  }
+
+  //----------------------------------------
+
+  template <class FunctorType>
+  int team_size_max(const FunctorType&, const ParallelForTag&) const {
+    int pool_size          = traits::execution_space::impl_thread_pool_size(1);
+    int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
+    return pool_size < max_host_team_size ? pool_size : max_host_team_size;
+  }
+  template <class FunctorType>
+  int team_size_max(const FunctorType&, const ParallelReduceTag&) const {
+    int pool_size          = traits::execution_space::impl_thread_pool_size(1);
+    int max_host_team_size = Impl::HostThreadTeamData::max_team_members;
+    return pool_size < max_host_team_size ? pool_size : max_host_team_size;
+  }
+  template <class FunctorType, class ReducerType>
+  inline int team_size_max(const FunctorType& f, const ReducerType&,
+                           const ParallelReduceTag& t) const {
+    return team_size_max(f, t);
+  }
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType&, const ParallelForTag&) const {
+    return traits::execution_space::impl_thread_pool_size(2);
+  }
+  template <class FunctorType>
+  int team_size_recommended(const FunctorType&,
+                            const ParallelReduceTag&) const {
+    return traits::execution_space::impl_thread_pool_size(2);
+  }
+  template <class FunctorType, class ReducerType>
+  inline int team_size_recommended(const FunctorType& f, const ReducerType&,
+                                   const ParallelReduceTag& t) const {
+    return team_size_recommended(f, t);
+  }
+
+  inline static int vector_length_max() {
+    return 1024;
+  }  // Use arbitrary large number, is meant as a vectorizable length
+
+  inline static int scratch_size_max(int level) {
+    return (level == 0 ? 1024 * 32 :  // Roughly L1 size
+                20 * 1024 * 1024);    // Limit to keep compatibility with CUDA
+  }
+
+  //----------------------------------------
+
+  inline int team_size() const { return m_team_size; }
+  inline int impl_vector_length() const { return 1; }
+  inline size_t team_alloc() const { return m_team_alloc; }
+  inline int league_size() const { return m_league_size; }
+
+  inline bool impl_auto_team_size() const { return m_tune_team_size; }
+  inline bool impl_auto_vector_length() const { return m_tune_vector_length; }
+  inline void impl_set_team_size(size_t size) { init(m_league_size, size); }
+  inline void impl_set_vector_length(size_t /**size*/) {}
+  inline size_t scratch_size(const int& level, int team_size_ = -1) const {
+    if (team_size_ < 0) team_size_ = m_team_size;
+    return m_team_scratch_size[level] +
+           team_size_ * m_thread_scratch_size[level];
+  }
+
+  inline int team_iter() const { return m_team_iter; }
+
+  /** \brief  Specify league size, request team size */
+  TeamPolicyInternal(const typename traits::execution_space&,
+                     int league_size_request, int team_size_request,
+                     int vector_length_request = 1)
+      : m_league_size(0),
+        m_team_size(0),
+        m_team_alloc(0),
+        m_team_scratch_size{0, 0},
+        m_thread_scratch_size{0, 0},
+        m_chunk_size(0),
+        m_tune_team_size(false),
+        m_tune_vector_length(false) {
+    init(league_size_request, team_size_request);
+    (void)vector_length_request;
+  }
+
+  /** \brief  Specify league size, request team size and vector length*/
+  TeamPolicyInternal(const typename traits::execution_space& space,
+                     int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : TeamPolicyInternal(space, league_size_request, -1, -1) {}
+
+  /** \brief  Specify league size, request team size*/
+  TeamPolicyInternal(const typename traits::execution_space& space,
+                     int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     int vector_length_request)
+      : TeamPolicyInternal(space, league_size_request, -1,
+                           vector_length_request) {}
+
+  /** \brief  Specify league size and team size, request vector length*/
+  TeamPolicyInternal(const typename traits::execution_space& space,
+                     int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : TeamPolicyInternal(space, league_size_request, team_size_request, -1) {}
+
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, team_size_request,
+                           vector_length_request) {}
+
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     int vector_length_request = 1)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, -1, vector_length_request) {}
+
+  /** \brief  Specify league size, request team size and vector length*/
+  TeamPolicyInternal(int league_size_request,
+                     const Kokkos::AUTO_t& /* team_size_request */
+                     ,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, -1, -1) {}
+
+  /** \brief  Specify league size and team size, request vector length*/
+  TeamPolicyInternal(int league_size_request, int team_size_request,
+                     const Kokkos::AUTO_t& /* vector_length_request */)
+      : TeamPolicyInternal(typename traits::execution_space(),
+                           league_size_request, team_size_request, -1) {}
+
+  inline int chunk_size() const { return m_chunk_size; }
+
+  /** \brief set chunk_size to a discrete value*/
+  inline TeamPolicyInternal& set_chunk_size(
+      typename traits::index_type chunk_size_) {
+    m_chunk_size = chunk_size_;
+    return *this;
+  }
+
+  /** \brief set per team scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(const int& level,
+                                              const PerTeamValue& per_team) {
+    m_team_scratch_size[level] = per_team.value;
+    return *this;
+  }
+
+  /** \brief set per thread scratch size for a specific level of the scratch
+   * hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerThreadValue& per_thread) {
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+  /** \brief set per thread and per team scratch size for a specific level of
+   * the scratch hierarchy */
+  inline TeamPolicyInternal& set_scratch_size(
+      const int& level, const PerTeamValue& per_team,
+      const PerThreadValue& per_thread) {
+    m_team_scratch_size[level]   = per_team.value;
+    m_thread_scratch_size[level] = per_thread.value;
+    return *this;
+  }
+
+ private:
+  /** \brief finalize chunk_size if it was set to AUTO*/
+  inline void set_auto_chunk_size() {
+    int64_t concurrency = traits::execution_space::concurrency() / m_team_alloc;
+    if (concurrency == 0) concurrency = 1;
+
+    if (m_chunk_size > 0) {
+      if (!Impl::is_integral_power_of_two(m_chunk_size))
+        Kokkos::abort("TeamPolicy blocking granularity must be power of two");
+    }
+
+    int new_chunk_size = 1;
+    while (new_chunk_size * 100 * concurrency < m_league_size)
+      new_chunk_size *= 2;
+    if (new_chunk_size < 128) {
+      new_chunk_size = 1;
+      while ((new_chunk_size * 40 * concurrency < m_league_size) &&
+             (new_chunk_size < 128))
+        new_chunk_size *= 2;
+    }
+    m_chunk_size = new_chunk_size;
+  }
+
+ public:
+  using member_type = Impl::ThreadsExecTeamMember;
+
+  friend class Impl::ThreadsExecTeamMember;
+};
+
+} /*namespace Impl */
+} /* namespace Kokkos */
+
+namespace Kokkos {
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::ThreadsExecTeamMember>
+    TeamThreadRange(const Impl::ThreadsExecTeamMember& thread,
+                    const iType& count) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType,
+                                               Impl::ThreadsExecTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::ThreadsExecTeamMember>
+TeamThreadRange(const Impl::ThreadsExecTeamMember& thread, const iType1& begin,
+                const iType2& end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<iType,
+                                               Impl::ThreadsExecTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::TeamThreadRangeBoundariesStruct<iType, Impl::ThreadsExecTeamMember>
+    TeamVectorRange(const Impl::ThreadsExecTeamMember& thread,
+                    const iType& count) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType,
+                                               Impl::ThreadsExecTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::ThreadsExecTeamMember>
+TeamVectorRange(const Impl::ThreadsExecTeamMember& thread, const iType1& begin,
+                const iType2& end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::TeamThreadRangeBoundariesStruct<iType,
+                                               Impl::ThreadsExecTeamMember>(
+      thread, iType(begin), iType(end));
+}
+
+template <typename iType>
+KOKKOS_INLINE_FUNCTION
+    Impl::ThreadVectorRangeBoundariesStruct<iType, Impl::ThreadsExecTeamMember>
+    ThreadVectorRange(const Impl::ThreadsExecTeamMember& thread,
+                      const iType& count) {
+  return Impl::ThreadVectorRangeBoundariesStruct<iType,
+                                                 Impl::ThreadsExecTeamMember>(
+      thread, count);
+}
+
+template <typename iType1, typename iType2>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Impl::ThreadsExecTeamMember>
+ThreadVectorRange(const Impl::ThreadsExecTeamMember& thread,
+                  const iType1& arg_begin, const iType2& arg_end) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::ThreadVectorRangeBoundariesStruct<iType,
+                                                 Impl::ThreadsExecTeamMember>(
+      thread, iType(arg_begin), iType(arg_end));
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::ThreadSingleStruct<Impl::ThreadsExecTeamMember> PerTeam(
+    const Impl::ThreadsExecTeamMember& thread) {
+  return Impl::ThreadSingleStruct<Impl::ThreadsExecTeamMember>(thread);
+}
+
+KOKKOS_INLINE_FUNCTION
+Impl::VectorSingleStruct<Impl::ThreadsExecTeamMember> PerThread(
+    const Impl::ThreadsExecTeamMember& thread) {
+  return Impl::VectorSingleStruct<Impl::ThreadsExecTeamMember>(thread);
+}
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+/** \brief  Inter-thread parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+    const Lambda& lambda) {
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment)
+    lambda(i);
+}
+
+/** \brief  Inter-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all threads of the the calling thread team
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ValueType& result) {
+  ValueType intermediate;
+  Sum<ValueType> sum(intermediate);
+  sum.init(intermediate);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    ValueType tmp = ValueType();
+    lambda(i, tmp);
+    intermediate += tmp;
+  }
+
+  loop_boundaries.thread.team_reduce(sum, intermediate);
+  result = sum.reference();
+}
+
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::TeamThreadRangeBoundariesStruct<
+                    iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, const ReducerType& reducer) {
+  typename ReducerType::value_type value;
+  reducer.init(value);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, value);
+  }
+
+  loop_boundaries.thread.team_reduce(reducer, value);
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+/** \brief  Intra-thread vector parallel_for. Executes lambda(iType i) for each
+ * i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread.
+ */
+template <typename iType, class Lambda>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+    const Lambda& lambda) {
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment)
+    lambda(i);
+}
+
+/** \brief  Intra-thread vector parallel_reduce. Executes lambda(iType i,
+ * ValueType & val) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes of the the calling thread
+ * and a summation of val is performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<!Kokkos::is_reducer<ValueType>::value>
+parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, ValueType& result) {
+  result = ValueType();
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+}
+
+template <typename iType, class Lambda, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+                    iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+                const Lambda& lambda, const ReducerType& reducer) {
+  reducer.init(reducer.reference());
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, reducer.reference());
+  }
+}
+
+/** \brief  Inter-thread parallel exclusive prefix sum. Executes
+ * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
+ *
+ */
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::TeamThreadRangeBoundariesStruct<
+        iType, Impl::ThreadsExecTeamMember>& loop_bounds,
+    const FunctorType& lambda) {
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void,
+      FunctorType>::value_type;
+
+  auto scan_val = value_type{};
+
+  // Intra-member scan
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_bounds.start; i < loop_bounds.end;
+       i += loop_bounds.increment) {
+    lambda(i, scan_val, false);
+  }
+
+  // 'scan_val' output is the exclusive prefix sum
+  scan_val = loop_bounds.thread.team_scan(scan_val);
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_bounds.start; i < loop_bounds.end;
+       i += loop_bounds.increment) {
+    lambda(i, scan_val, true);
+  }
+}
+
+/** \brief  Intra-thread vector parallel exclusive prefix sum. Executes
+ * lambda(iType i, ValueType & val, bool final) for each i=0..N-1.
+ *
+ * The range i=0..N-1 is mapped to all vector lanes in the thread and a scan
+ * operation is performed. Depending on the target execution space the operator
+ * might be called twice: once with final=false and once with final=true. When
+ * final==true val contains the prefix sum value. The contribution of this "i"
+ * needs to be added to val no matter whether final==true or not. In a serial
+ * execution (i.e. team_size==1) the operator is only called once with
+ * final==true. Scan_val will be set to the final sum value over all vector
+ * lanes.
+ */
+template <typename iType, class FunctorType>
+KOKKOS_INLINE_FUNCTION void parallel_scan(
+    const Impl::ThreadVectorRangeBoundariesStruct<
+        iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+    const FunctorType& lambda) {
+  using value_type =
+      typename Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                     TeamPolicy<Threads>,
+                                     FunctorType>::value_type;
+
+  value_type scan_val = value_type();
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, scan_val, true);
+  }
+}
+
+/** \brief  Intra-thread vector parallel scan with reducer
+ *
+ */
+template <typename iType, class FunctorType, typename ReducerType>
+KOKKOS_INLINE_FUNCTION std::enable_if_t<Kokkos::is_reducer<ReducerType>::value>
+parallel_scan(const Impl::ThreadVectorRangeBoundariesStruct<
+                  iType, Impl::ThreadsExecTeamMember>& loop_boundaries,
+              const FunctorType& lambda, const ReducerType& reducer) {
+  typename ReducerType::value_type scan_val;
+  reducer.init(scan_val);
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, scan_val, true);
+  }
+}
+
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<
+        Impl::ThreadsExecTeamMember>& /*single_struct*/,
+    const FunctorType& lambda) {
+  lambda();
+}
+
+template <class FunctorType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::ThreadsExecTeamMember>& single_struct,
+    const FunctorType& lambda) {
+  if (single_struct.team_member.team_rank() == 0) lambda();
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::VectorSingleStruct<
+        Impl::ThreadsExecTeamMember>& /*single_struct*/,
+    const FunctorType& lambda, ValueType& val) {
+  lambda(val);
+}
+
+template <class FunctorType, class ValueType>
+KOKKOS_INLINE_FUNCTION void single(
+    const Impl::ThreadSingleStruct<Impl::ThreadsExecTeamMember>& single_struct,
+    const FunctorType& lambda, ValueType& val) {
+  if (single_struct.team_member.team_rank() == 0) {
+    lambda(val);
+  }
+  single_struct.team_member.team_broadcast(val, 0);
+}
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+#endif /* #define KOKKOS_THREADSTEAM_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_MDRange.hpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_MDRange.hpp
new file mode 100644 (file)
index 0000000..6d1a38d
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADS_PARALLEL_MDRANGE_HPP
+#define KOKKOS_THREADS_PARALLEL_MDRANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+#include <KokkosExp_MDRangePolicy.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::MDRangePolicy<Traits...>,
+                  Kokkos::Threads> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+
+  using WorkTag = typename MDRangePolicy::work_tag;
+
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using iterate_type = typename Kokkos::Impl::HostIterateTile<
+      MDRangePolicy, FunctorType, typename MDRangePolicy::work_tag, void>;
+
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;  // construct as RangePolicy( 0, num_tiles
+                          // ).set_chunk_size(1) in ctor
+
+  inline static void exec_range(const MDRangePolicy &mdr_policy,
+                                const FunctorType &functor, const Member ibeg,
+                                const Member iend) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      iterate_type(mdr_policy, functor)(i);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    exec_schedule<typename Policy::schedule_type::type>(exec, arg);
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelFor &self = *((const ParallelFor *)arg);
+
+    WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    ParallelFor::exec_range(self.m_mdr_policy, self.m_functor, range.begin(),
+                            range.end());
+
+    exec.fan_in();
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelFor &self = *((const ParallelFor *)arg);
+
+    WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    exec.set_work_range(range.begin(), range.end(), self.m_policy.chunk_size());
+    exec.reset_steal_target();
+    exec.barrier();
+
+    long work_index = exec.get_work_index();
+
+    while (work_index != -1) {
+      const Member begin =
+          static_cast<Member>(work_index) * self.m_policy.chunk_size();
+      const Member end =
+          begin + self.m_policy.chunk_size() < self.m_policy.end()
+              ? begin + self.m_policy.chunk_size()
+              : self.m_policy.end();
+
+      ParallelFor::exec_range(self.m_mdr_policy, self.m_functor, begin, end);
+      work_index = exec.get_work_index();
+    }
+
+    exec.fan_in();
+  }
+
+ public:
+  inline void execute() const {
+    ThreadsExec::start(&ParallelFor::exec, this);
+    ThreadsExec::fence();
+  }
+
+  ParallelFor(const FunctorType &arg_functor, const MDRangePolicy &arg_policy)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)) {}
+
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy &, const Functor &) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::MDRangePolicy<Traits...>, ReducerType,
+                     Kokkos::Threads> {
+ private:
+  using MDRangePolicy = Kokkos::MDRangePolicy<Traits...>;
+  using Policy        = typename MDRangePolicy::impl_range_policy;
+
+  using WorkTag   = typename MDRangePolicy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         MDRangePolicy, ReducerTypeFwd>;
+  using pointer_type   = typename Analysis::pointer_type;
+  using value_type     = typename Analysis::value_type;
+  using reference_type = typename Analysis::reference_type;
+
+  using iterate_type =
+      typename Kokkos::Impl::HostIterateTile<MDRangePolicy, FunctorType,
+                                             WorkTag, reference_type>;
+
+  const FunctorType m_functor;
+  const MDRangePolicy m_mdr_policy;
+  const Policy m_policy;  // construct as RangePolicy( 0, num_tiles
+                          // ).set_chunk_size(1) in ctor
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  inline static void exec_range(const MDRangePolicy &mdr_policy,
+                                const FunctorType &functor, const Member &ibeg,
+                                const Member &iend, reference_type update) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      iterate_type(mdr_policy, functor, update)(i);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    exec_schedule<typename Policy::schedule_type::type>(exec, arg);
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelReduce &self = *((const ParallelReduce *)arg);
+    const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    typename Analysis::Reducer reducer(
+        &ReducerConditional::select(self.m_functor, self.m_reducer));
+
+    ParallelReduce::exec_range(
+        self.m_mdr_policy, self.m_functor, range.begin(), range.end(),
+        reducer.init(static_cast<pointer_type>(exec.reduce_memory())));
+
+    exec.fan_in_reduce(reducer);
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelReduce &self = *((const ParallelReduce *)arg);
+    const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    exec.set_work_range(range.begin(), range.end(), self.m_policy.chunk_size());
+    exec.reset_steal_target();
+    exec.barrier();
+
+    long work_index = exec.get_work_index();
+    typename Analysis::Reducer reducer(
+        &ReducerConditional::select(self.m_functor, self.m_reducer));
+
+    reference_type update =
+        reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
+    while (work_index != -1) {
+      const Member begin =
+          static_cast<Member>(work_index) * self.m_policy.chunk_size();
+      const Member end =
+          begin + self.m_policy.chunk_size() < self.m_policy.end()
+              ? begin + self.m_policy.chunk_size()
+              : self.m_policy.end();
+      ParallelReduce::exec_range(self.m_mdr_policy, self.m_functor, begin, end,
+                                 update);
+      work_index = exec.get_work_index();
+    }
+
+    exec.fan_in_reduce(reducer);
+  }
+
+ public:
+  inline void execute() const {
+    ThreadsExec::resize_scratch(
+        Analysis::value_size(ReducerConditional::select(m_functor, m_reducer)),
+        0);
+
+    ThreadsExec::start(&ParallelReduce::exec, this);
+
+    ThreadsExec::fence();
+
+    if (m_result_ptr) {
+      const pointer_type data =
+          (pointer_type)ThreadsExec::root_reduce_scratch();
+
+      const unsigned n = Analysis::value_count(
+          ReducerConditional::select(m_functor, m_reducer));
+      for (unsigned i = 0; i < n; ++i) {
+        m_result_ptr[i] = data[i];
+      }
+    }
+  }
+
+  template <class HostViewType>
+  ParallelReduce(const FunctorType &arg_functor,
+                 const MDRangePolicy &arg_policy,
+                 const HostViewType &arg_result_view,
+                 std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void *> = nullptr)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result_view.data()) {
+    static_assert(Kokkos::is_view<HostViewType>::value,
+                  "Kokkos::Threads reduce result must be a View");
+
+    static_assert(
+        std::is_same<typename HostViewType::memory_space, HostSpace>::value,
+        "Kokkos::Threads reduce result must be a View in HostSpace");
+  }
+
+  inline ParallelReduce(const FunctorType &arg_functor,
+                        MDRangePolicy arg_policy, const ReducerType &reducer)
+      : m_functor(arg_functor),
+        m_mdr_policy(arg_policy),
+        m_policy(Policy(0, m_mdr_policy.m_num_tiles).set_chunk_size(1)),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()) {
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+
+  template <typename Policy, typename Functor>
+  static int max_tile_size_product(const Policy &, const Functor &) {
+    /**
+     * 1024 here is just our guess for a reasonable max tile size,
+     * it isn't a hardware constraint. If people see a use for larger
+     * tile size products, we're happy to change this.
+     */
+    return 1024;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_Range.hpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_Range.hpp
new file mode 100644 (file)
index 0000000..971a0bb
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADS_PARALLEL_RANGE_HPP
+#define KOKKOS_THREADS_PARALLEL_RANGE_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::RangePolicy<Traits...>,
+                  Kokkos::Threads> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member ibeg, const Member iend) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(i);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member ibeg, const Member iend) {
+    const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(t, i);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    exec_schedule<typename Policy::schedule_type::type>(exec, arg);
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelFor &self = *((const ParallelFor *)arg);
+
+    WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    ParallelFor::template exec_range<WorkTag>(self.m_functor, range.begin(),
+                                              range.end());
+
+    exec.fan_in();
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelFor &self = *((const ParallelFor *)arg);
+
+    WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    exec.set_work_range(range.begin() - self.m_policy.begin(),
+                        range.end() - self.m_policy.begin(),
+                        self.m_policy.chunk_size());
+    exec.reset_steal_target();
+    exec.barrier();
+
+    long work_index = exec.get_work_index();
+
+    while (work_index != -1) {
+      const Member begin =
+          static_cast<Member>(work_index) * self.m_policy.chunk_size() +
+          self.m_policy.begin();
+      const Member end =
+          begin + self.m_policy.chunk_size() < self.m_policy.end()
+              ? begin + self.m_policy.chunk_size()
+              : self.m_policy.end();
+      ParallelFor::template exec_range<WorkTag>(self.m_functor, begin, end);
+      work_index = exec.get_work_index();
+    }
+
+    exec.fan_in();
+  }
+
+ public:
+  inline void execute() const {
+    ThreadsExec::start(&ParallelFor::exec, this);
+    ThreadsExec::fence();
+  }
+
+  ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class ReducerType, class... Traits>
+class ParallelReduce<FunctorType, Kokkos::RangePolicy<Traits...>, ReducerType,
+                     Kokkos::Threads> {
+ private:
+  using Policy = Kokkos::RangePolicy<Traits...>;
+
+  using WorkTag   = typename Policy::work_tag;
+  using WorkRange = typename Policy::WorkRange;
+  using Member    = typename Policy::member_type;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         Policy, ReducerTypeFwd>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member &ibeg, const Member &iend,
+      reference_type update) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(i, update);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member &ibeg, const Member &iend,
+      reference_type update) {
+    const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(t, i, update);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    exec_schedule<typename Policy::schedule_type::type>(exec, arg);
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Static>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelReduce &self = *((const ParallelReduce *)arg);
+    const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    typename Analysis::Reducer reducer(
+        &ReducerConditional::select(self.m_functor, self.m_reducer));
+
+    ParallelReduce::template exec_range<WorkTag>(
+        self.m_functor, range.begin(), range.end(),
+        reducer.init(static_cast<pointer_type>(exec.reduce_memory())));
+
+    exec.fan_in_reduce(reducer);
+  }
+
+  template <class Schedule>
+  static std::enable_if_t<std::is_same<Schedule, Kokkos::Dynamic>::value>
+  exec_schedule(ThreadsExec &exec, const void *arg) {
+    const ParallelReduce &self = *((const ParallelReduce *)arg);
+    const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    exec.set_work_range(range.begin() - self.m_policy.begin(),
+                        range.end() - self.m_policy.begin(),
+                        self.m_policy.chunk_size());
+    exec.reset_steal_target();
+    exec.barrier();
+
+    long work_index = exec.get_work_index();
+    typename Analysis::Reducer reducer(
+        &ReducerConditional::select(self.m_functor, self.m_reducer));
+
+    reference_type update =
+        reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
+    while (work_index != -1) {
+      const Member begin =
+          static_cast<Member>(work_index) * self.m_policy.chunk_size() +
+          self.m_policy.begin();
+      const Member end =
+          begin + self.m_policy.chunk_size() < self.m_policy.end()
+              ? begin + self.m_policy.chunk_size()
+              : self.m_policy.end();
+      ParallelReduce::template exec_range<WorkTag>(self.m_functor, begin, end,
+                                                   update);
+      work_index = exec.get_work_index();
+    }
+
+    exec.fan_in_reduce(reducer);
+  }
+
+ public:
+  inline void execute() const {
+    if (m_policy.end() <= m_policy.begin()) {
+      if (m_result_ptr) {
+        typename Analysis::Reducer final_reducer(
+            &ReducerConditional::select(m_functor, m_reducer));
+        final_reducer.init(m_result_ptr);
+        final_reducer.final(m_result_ptr);
+      }
+    } else {
+      ThreadsExec::resize_scratch(
+          Analysis::value_size(
+              ReducerConditional::select(m_functor, m_reducer)),
+          0);
+
+      ThreadsExec::start(&ParallelReduce::exec, this);
+
+      ThreadsExec::fence();
+
+      if (m_result_ptr) {
+        const pointer_type data =
+            (pointer_type)ThreadsExec::root_reduce_scratch();
+
+        const unsigned n = Analysis::value_count(
+            ReducerConditional::select(m_functor, m_reducer));
+        for (unsigned i = 0; i < n; ++i) {
+          m_result_ptr[i] = data[i];
+        }
+      }
+    }
+  }
+
+  template <class HostViewType>
+  ParallelReduce(const FunctorType &arg_functor, const Policy &arg_policy,
+                 const HostViewType &arg_result_view,
+                 std::enable_if_t<Kokkos::is_view<HostViewType>::value &&
+                                      !Kokkos::is_reducer<ReducerType>::value,
+                                  void *> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result_view.data()) {
+    static_assert(Kokkos::is_view<HostViewType>::value,
+                  "Kokkos::Threads reduce result must be a View");
+
+    static_assert(
+        std::is_same<typename HostViewType::memory_space, HostSpace>::value,
+        "Kokkos::Threads reduce result must be a View in HostSpace");
+  }
+
+  inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
+                        const ReducerType &reducer)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()) {
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                                    , Kokkos::HostSpace >::value
+      , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+      );*/
+  }
+};
+
+template <class FunctorType, class... Traits>
+class ParallelScan<FunctorType, Kokkos::RangePolicy<Traits...>,
+                   Kokkos::Threads> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkRange = typename Policy::WorkRange;
+  using WorkTag   = typename Policy::work_tag;
+  using Member    = typename Policy::member_type;
+  using Analysis  = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                         Policy, FunctorType>;
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member &ibeg, const Member &iend,
+      reference_type update, const bool final) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(i, update, final);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member &ibeg, const Member &iend,
+      reference_type update, const bool final) {
+    const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(t, i, update, final);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    const ParallelScan &self = *((const ParallelScan *)arg);
+
+    const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    typename Analysis::Reducer final_reducer(&self.m_functor);
+
+    reference_type update =
+        final_reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
+
+    ParallelScan::template exec_range<WorkTag>(self.m_functor, range.begin(),
+                                               range.end(), update, false);
+
+    //  exec.template scan_large( final_reducer );
+    exec.scan_small(final_reducer);
+
+    ParallelScan::template exec_range<WorkTag>(self.m_functor, range.begin(),
+                                               range.end(), update, true);
+
+    exec.fan_in();
+  }
+
+ public:
+  inline void execute() const {
+    ThreadsExec::resize_scratch(2 * Analysis::value_size(m_functor), 0);
+    ThreadsExec::start(&ParallelScan::exec, this);
+    ThreadsExec::fence();
+  }
+
+  ParallelScan(const FunctorType &arg_functor, const Policy &arg_policy)
+      : m_functor(arg_functor), m_policy(arg_policy) {}
+};
+
+template <class FunctorType, class ReturnType, class... Traits>
+class ParallelScanWithTotal<FunctorType, Kokkos::RangePolicy<Traits...>,
+                            ReturnType, Kokkos::Threads> {
+ private:
+  using Policy    = Kokkos::RangePolicy<Traits...>;
+  using WorkRange = typename Policy::WorkRange;
+  using WorkTag   = typename Policy::work_tag;
+  using Member    = typename Policy::member_type;
+
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::SCAN,
+                                         Policy, FunctorType>;
+
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  ReturnType &m_returnvalue;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member &ibeg, const Member &iend,
+      reference_type update, const bool final) {
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(i, update, final);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_range(
+      const FunctorType &functor, const Member &ibeg, const Member &iend,
+      reference_type update, const bool final) {
+    const TagType t{};
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP)
+#pragma ivdep
+#endif
+    for (Member i = ibeg; i < iend; ++i) {
+      functor(t, i, update, final);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    const ParallelScanWithTotal &self = *((const ParallelScanWithTotal *)arg);
+
+    const WorkRange range(self.m_policy, exec.pool_rank(), exec.pool_size());
+
+    typename Analysis::Reducer final_reducer(&self.m_functor);
+
+    reference_type update =
+        final_reducer.init(static_cast<pointer_type>(exec.reduce_memory()));
+
+    ParallelScanWithTotal::template exec_range<WorkTag>(
+        self.m_functor, range.begin(), range.end(), update, false);
+
+    //  exec.template scan_large(final_reducer);
+    exec.scan_small(final_reducer);
+
+    ParallelScanWithTotal::template exec_range<WorkTag>(
+        self.m_functor, range.begin(), range.end(), update, true);
+
+    exec.fan_in();
+
+    if (exec.pool_rank() == exec.pool_size() - 1) {
+      self.m_returnvalue = update;
+    }
+  }
+
+ public:
+  inline void execute() const {
+    ThreadsExec::resize_scratch(2 * Analysis::value_size(m_functor), 0);
+    ThreadsExec::start(&ParallelScanWithTotal::exec, this);
+    ThreadsExec::fence();
+  }
+
+  ParallelScanWithTotal(const FunctorType &arg_functor,
+                        const Policy &arg_policy, ReturnType &arg_returnvalue)
+      : m_functor(arg_functor),
+        m_policy(arg_policy),
+        m_returnvalue(arg_returnvalue) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_Team.hpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_Parallel_Team.hpp
new file mode 100644 (file)
index 0000000..bdda110
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADS_PARALLEL_TEAM_HPP
+#define KOKKOS_THREADS_PARALLEL_TEAM_HPP
+
+#include <Kokkos_Parallel.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Properties>
+class ParallelFor<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                  Kokkos::Threads> {
+ private:
+  using Policy =
+      Kokkos::Impl::TeamPolicyInternal<Kokkos::Threads, Properties...>;
+  using WorkTag = typename Policy::work_tag;
+  using Member  = typename Policy::member_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const size_t m_shared;
+
+  template <class TagType, class Schedule>
+  inline static std::enable_if_t<std::is_void<TagType>::value &&
+                                 std::is_same<Schedule, Kokkos::Static>::value>
+  exec_team(const FunctorType &functor, Member member) {
+    for (; member.valid_static(); member.next_static()) {
+      functor(member);
+    }
+  }
+
+  template <class TagType, class Schedule>
+  inline static std::enable_if_t<!std::is_void<TagType>::value &&
+                                 std::is_same<Schedule, Kokkos::Static>::value>
+  exec_team(const FunctorType &functor, Member member) {
+    const TagType t{};
+    for (; member.valid_static(); member.next_static()) {
+      functor(t, member);
+    }
+  }
+
+  template <class TagType, class Schedule>
+  inline static std::enable_if_t<std::is_void<TagType>::value &&
+                                 std::is_same<Schedule, Kokkos::Dynamic>::value>
+  exec_team(const FunctorType &functor, Member member) {
+    for (; member.valid_dynamic(); member.next_dynamic()) {
+      functor(member);
+    }
+  }
+
+  template <class TagType, class Schedule>
+  inline static std::enable_if_t<!std::is_void<TagType>::value &&
+                                 std::is_same<Schedule, Kokkos::Dynamic>::value>
+  exec_team(const FunctorType &functor, Member member) {
+    const TagType t{};
+    for (; member.valid_dynamic(); member.next_dynamic()) {
+      functor(t, member);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    const ParallelFor &self = *((const ParallelFor *)arg);
+
+    ParallelFor::exec_team<WorkTag, typename Policy::schedule_type::type>(
+        self.m_functor, Member(&exec, self.m_policy, self.m_shared));
+
+    exec.barrier();
+    exec.fan_in();
+  }
+  template <typename Policy>
+  Policy fix_policy(Policy policy) {
+    if (policy.impl_vector_length() < 0) {
+      policy.impl_set_vector_length(1);
+    }
+    if (policy.team_size() < 0) {
+      policy.impl_set_team_size(
+          policy.team_size_recommended(m_functor, ParallelForTag{}));
+    }
+    return policy;
+  }
+
+ public:
+  inline void execute() const {
+    ThreadsExec::resize_scratch(
+        0, Policy::member_type::team_reduce_size() + m_shared);
+
+    ThreadsExec::start(&ParallelFor::exec, this);
+
+    ThreadsExec::fence();
+  }
+
+  ParallelFor(const FunctorType &arg_functor, const Policy &arg_policy)
+      : m_functor(arg_functor),
+        m_policy(fix_policy(arg_policy)),
+        m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(
+                     arg_functor, m_policy.team_size())) {}
+};
+
+template <class FunctorType, class ReducerType, class... Properties>
+class ParallelReduce<FunctorType, Kokkos::TeamPolicy<Properties...>,
+                     ReducerType, Kokkos::Threads> {
+ private:
+  using Policy =
+      Kokkos::Impl::TeamPolicyInternal<Kokkos::Threads, Properties...>;
+  using WorkTag = typename Policy::work_tag;
+  using Member  = typename Policy::member_type;
+
+  using ReducerConditional =
+      Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                         FunctorType, ReducerType>;
+  using ReducerTypeFwd = typename ReducerConditional::type;
+  using WorkTagFwd =
+      typename Kokkos::Impl::if_c<std::is_same<InvalidType, ReducerType>::value,
+                                  WorkTag, void>::type;
+
+  using Analysis = Impl::FunctorAnalysis<Impl::FunctorPatternInterface::REDUCE,
+                                         Policy, ReducerTypeFwd>;
+  using pointer_type   = typename Analysis::pointer_type;
+  using reference_type = typename Analysis::reference_type;
+
+  const FunctorType m_functor;
+  const Policy m_policy;
+  const ReducerType m_reducer;
+  const pointer_type m_result_ptr;
+  const size_t m_shared;
+
+  template <class TagType>
+  inline static std::enable_if_t<std::is_void<TagType>::value> exec_team(
+      const FunctorType &functor, Member member, reference_type update) {
+    for (; member.valid_static(); member.next_static()) {
+      functor(member, update);
+    }
+  }
+
+  template <class TagType>
+  inline static std::enable_if_t<!std::is_void<TagType>::value> exec_team(
+      const FunctorType &functor, Member member, reference_type update) {
+    const TagType t{};
+    for (; member.valid_static(); member.next_static()) {
+      functor(t, member, update);
+    }
+  }
+
+  static void exec(ThreadsExec &exec, const void *arg) {
+    const ParallelReduce &self = *((const ParallelReduce *)arg);
+
+    typename Analysis::Reducer reducer(
+        &ReducerConditional::select(self.m_functor, self.m_reducer));
+
+    ParallelReduce::template exec_team<WorkTag>(
+        self.m_functor, Member(&exec, self.m_policy, self.m_shared),
+        reducer.init(static_cast<pointer_type>(exec.reduce_memory())));
+
+    exec.fan_in_reduce(reducer);
+  }
+
+ public:
+  inline void execute() const {
+    if (m_policy.league_size() * m_policy.team_size() == 0) {
+      if (m_result_ptr) {
+        typename Analysis::Reducer final_reducer(
+            &ReducerConditional::select(m_functor, m_reducer));
+        final_reducer.init(m_result_ptr);
+        final_reducer.final(m_result_ptr);
+      }
+    } else {
+      ThreadsExec::resize_scratch(
+          Analysis::value_size(
+              ReducerConditional::select(m_functor, m_reducer)),
+          Policy::member_type::team_reduce_size() + m_shared);
+
+      ThreadsExec::start(&ParallelReduce::exec, this);
+
+      ThreadsExec::fence();
+
+      if (m_result_ptr) {
+        const pointer_type data =
+            (pointer_type)ThreadsExec::root_reduce_scratch();
+
+        const unsigned n = Analysis::value_count(
+            ReducerConditional::select(m_functor, m_reducer));
+        for (unsigned i = 0; i < n; ++i) {
+          m_result_ptr[i] = data[i];
+        }
+      }
+    }
+  }
+
+  template <typename Policy>
+  Policy fix_policy(Policy policy) {
+    if (policy.impl_vector_length() < 0) {
+      policy.impl_set_vector_length(1);
+    }
+    if (policy.team_size() < 0) {
+      policy.impl_set_team_size(policy.team_size_recommended(
+          m_functor, m_reducer, ParallelReduceTag{}));
+    }
+    return policy;
+  }
+
+  template <class ViewType>
+  inline ParallelReduce(
+      const FunctorType &arg_functor, const Policy &arg_policy,
+      const ViewType &arg_result,
+      std::enable_if_t<Kokkos::is_view<ViewType>::value &&
+                           !Kokkos::is_reducer<ReducerType>::value,
+                       void *> = nullptr)
+      : m_functor(arg_functor),
+        m_policy(fix_policy(arg_policy)),
+        m_reducer(InvalidType()),
+        m_result_ptr(arg_result.data()),
+        m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(
+                     arg_functor, m_policy.team_size())) {}
+
+  inline ParallelReduce(const FunctorType &arg_functor, Policy arg_policy,
+                        const ReducerType &reducer)
+      : m_functor(arg_functor),
+        m_policy(fix_policy(arg_policy)),
+        m_reducer(reducer),
+        m_result_ptr(reducer.view().data()),
+        m_shared(m_policy.scratch_size(0) + m_policy.scratch_size(1) +
+                 FunctorTeamShmemSize<FunctorType>::value(
+                     arg_functor, m_policy.team_size())) {
+    /*static_assert( std::is_same< typename ViewType::memory_space
+                            , Kokkos::HostSpace >::value
+    , "Reduction result on Kokkos::OpenMP must be a Kokkos::View in HostSpace"
+    );*/
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_UniqueToken.hpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_UniqueToken.hpp
new file mode 100644 (file)
index 0000000..f990119
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADS_UNIQUETOKEN_HPP
+#define KOKKOS_THREADS_UNIQUETOKEN_HPP
+
+#include <Kokkos_UniqueToken.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+template <>
+class UniqueToken<Threads, UniqueTokenScope::Instance> {
+ private:
+  using buffer_type = Kokkos::View<uint32_t *, Kokkos::HostSpace>;
+  int m_count;
+  buffer_type m_buffer_view;
+  uint32_t volatile *m_buffer;
+
+ public:
+  using execution_space = Threads;
+  using size_type       = int;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const & = execution_space()) noexcept
+      : m_count(::Kokkos::Threads::impl_thread_pool_size()),
+        m_buffer_view(buffer_type()),
+        m_buffer(nullptr) {}
+
+  UniqueToken(size_type max_size, execution_space const & = execution_space())
+      : m_count(max_size > ::Kokkos::Threads::impl_thread_pool_size()
+                    ? ::Kokkos::Threads::impl_thread_pool_size()
+                    : max_size),
+        m_buffer_view(
+            max_size > ::Kokkos::Threads::impl_thread_pool_size()
+                ? buffer_type()
+                : buffer_type("UniqueToken::m_buffer_view",
+                              ::Kokkos::Impl::concurrent_bitset::buffer_bound(
+                                  m_count))),
+        m_buffer(m_buffer_view.data()) {}
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int size() const noexcept { return m_count; }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int acquire() const noexcept {
+    KOKKOS_IF_ON_HOST((
+        if (m_buffer == nullptr) {
+          return Threads::impl_thread_pool_rank();
+        } else {
+          const ::Kokkos::pair<int, int> result =
+              ::Kokkos::Impl::concurrent_bitset::acquire_bounded(
+                  m_buffer, m_count, ::Kokkos::Impl::clock_tic() % m_count);
+
+          if (result.first < 0) {
+            ::Kokkos::abort(
+                "UniqueToken<Threads> failure to acquire tokens, no tokens "
+                "available");
+          }
+          return result.first;
+        }))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(int i) const noexcept {
+    KOKKOS_IF_ON_HOST((if (m_buffer != nullptr) {
+      ::Kokkos::Impl::concurrent_bitset::release(m_buffer, i);
+    }))
+
+    KOKKOS_IF_ON_DEVICE(((void)i;))
+  }
+};
+
+template <>
+class UniqueToken<Threads, UniqueTokenScope::Global> {
+ public:
+  using execution_space = Threads;
+  using size_type       = int;
+
+  /// \brief create object size for concurrency on the given instance
+  ///
+  /// This object should not be shared between instances
+  UniqueToken(execution_space const & = execution_space()) noexcept {}
+
+  /// \brief upper bound for acquired values, i.e. 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int size() const noexcept {
+    KOKKOS_IF_ON_HOST((return Threads::impl_thread_pool_size();))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief acquire value such that 0 <= value < size()
+  KOKKOS_INLINE_FUNCTION
+  int acquire() const noexcept {
+    KOKKOS_IF_ON_HOST((return Threads::impl_thread_pool_rank();))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  /// \brief release a value acquired by generate
+  KOKKOS_INLINE_FUNCTION
+  void release(int) const noexcept {}
+};
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_WorkGraphPolicy.hpp b/bundled/kokkos-3.7.00/core/src/Threads/Kokkos_Threads_WorkGraphPolicy.hpp
new file mode 100644 (file)
index 0000000..5e8ac46
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADS_WORKGRAPHPOLICY_HPP
+#define KOKKOS_THREADS_WORKGRAPHPOLICY_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Threads.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class FunctorType, class... Traits>
+class ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+                  Kokkos::Threads> {
+ private:
+  using Policy = Kokkos::WorkGraphPolicy<Traits...>;
+
+  using Self = ParallelFor<FunctorType, Kokkos::WorkGraphPolicy<Traits...>,
+                           Kokkos::Threads>;
+
+  Policy m_policy;
+  FunctorType m_functor;
+
+  template <class TagType>
+  std::enable_if_t<std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    m_functor(w);
+  }
+
+  template <class TagType>
+  std::enable_if_t<!std::is_void<TagType>::value> exec_one(
+      const std::int32_t w) const noexcept {
+    const TagType t{};
+    m_functor(t, w);
+  }
+
+  inline void exec_one_thread() const noexcept {
+    // Spin until COMPLETED_TOKEN.
+    // END_TOKEN indicates no work is currently available.
+
+    for (std::int32_t w = Policy::END_TOKEN;
+         Policy::COMPLETED_TOKEN != (w = m_policy.pop_work());) {
+      if (Policy::END_TOKEN != w) {
+        exec_one<typename Policy::work_tag>(w);
+        m_policy.completed_work(w);
+      }
+    }
+  }
+
+  static inline void thread_main(ThreadsExec& exec, const void* arg) noexcept {
+    const Self& self = *(static_cast<const Self*>(arg));
+    self.exec_one_thread();
+    exec.fan_in();
+  }
+
+ public:
+  inline void execute() {
+    ThreadsExec::start(&Self::thread_main, this);
+    ThreadsExec::fence();
+  }
+
+  inline ParallelFor(const FunctorType& arg_functor, const Policy& arg_policy)
+      : m_policy(arg_policy), m_functor(arg_functor) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #define KOKKOS_THREADS_WORKGRAPHPOLICY_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/View/Hooks/Kokkos_ViewHooks.hpp b/bundled/kokkos-3.7.00/core/src/View/Hooks/Kokkos_ViewHooks.hpp
new file mode 100644 (file)
index 0000000..77b2730
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_VIEWHOOKS_HPP
+#define KOKKOS_EXPERIMENTAL_VIEWHOOKS_HPP
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace Impl {
+template <typename View>
+using copy_subscription_function_type = void (*)(View &, const View &);
+
+template <template <typename> class Invoker, typename... Subscribers>
+struct invoke_subscriber_impl;
+
+template <template <typename> class Invoker>
+struct invoke_subscriber_impl<Invoker> {
+  template <typename ViewType>
+  static void invoke(ViewType &, const ViewType &) {}
+};
+
+template <template <typename> class Invoker, typename Subscriber,
+          typename... RemSubscribers>
+struct invoke_subscriber_impl<Invoker, Subscriber, RemSubscribers...> {
+  template <typename ViewType>
+  static void invoke(ViewType &self, const ViewType &other) {
+    Invoker<Subscriber>::call(self, other);
+    invoke_subscriber_impl<Invoker, RemSubscribers...>::invoke(self, other);
+  }
+};
+
+template <typename Subscriber>
+struct copy_constructor_invoker {
+  template <typename View>
+  static void call(View &self, const View &other) {
+    Subscriber::copy_constructed(self, other);
+  }
+};
+
+template <typename Subscriber>
+struct move_constructor_invoker {
+  template <typename View>
+  static void call(View &self, const View &other) {
+    Subscriber::move_constructed(self, other);
+  }
+};
+
+template <typename Subscriber>
+struct copy_assignment_operator_invoker {
+  template <typename View>
+  static void call(View &self, const View &other) {
+    Subscriber::copy_assigned(self, other);
+  }
+};
+
+template <typename Subscriber>
+struct move_assignment_operator_invoker {
+  template <typename View>
+  static void call(View &self, const View &other) {
+    Subscriber::move_assigned(self, other);
+  }
+};
+}  // namespace Impl
+
+struct EmptyViewHooks {
+  using hooks_policy = EmptyViewHooks;
+
+  template <typename View>
+  static void copy_construct(View &, const View &) {}
+  template <typename View>
+  static void copy_assign(View &, const View &) {}
+  template <typename View>
+  static void move_construct(View &, const View &) {}
+  template <typename View>
+  static void move_assign(View &, const View &) {}
+};
+
+template <class... Subscribers>
+struct SubscribableViewHooks {
+  using hooks_policy = SubscribableViewHooks<Subscribers...>;
+
+  template <typename View>
+  static void copy_construct(View &self, const View &other) {
+    Impl::invoke_subscriber_impl<Impl::copy_constructor_invoker,
+                                 Subscribers...>::invoke(self, other);
+  }
+  template <typename View>
+  static void copy_assign(View &self, const View &other) {
+    Impl::invoke_subscriber_impl<Impl::copy_assignment_operator_invoker,
+                                 Subscribers...>::invoke(self, other);
+  }
+  template <typename View>
+  static void move_construct(View &self, const View &other) {
+    Impl::invoke_subscriber_impl<Impl::move_constructor_invoker,
+                                 Subscribers...>::invoke(self, other);
+  }
+  template <typename View>
+  static void move_assign(View &self, const View &other) {
+    Impl::invoke_subscriber_impl<Impl::move_assignment_operator_invoker,
+                                 Subscribers...>::invoke(self, other);
+  }
+};
+
+using DefaultViewHooks = EmptyViewHooks;
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif  // KOKKOS_EXPERIMENTAL_VIEWHOOKS_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_CUDA.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_CUDA.hpp
new file mode 100644 (file)
index 0000000..aedb8d0
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_CUDA_HPP
+#define KOKKOS_DECLARE_CUDA_HPP
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Kokkos_Cuda.hpp>
+#include <Cuda/Kokkos_Cuda_Half_Impl_Type.hpp>
+#include <Cuda/Kokkos_Cuda_Half_Conversion.hpp>
+#include <Cuda/Kokkos_Cuda_Parallel_MDRange.hpp>
+#include <Cuda/Kokkos_Cuda_Parallel_Range.hpp>
+#include <Cuda/Kokkos_Cuda_Parallel_Team.hpp>
+#include <Cuda/Kokkos_Cuda_KernelLaunch.hpp>
+#include <Cuda/Kokkos_Cuda_Instance.hpp>
+#include <Cuda/Kokkos_Cuda_View.hpp>
+#include <Cuda/Kokkos_Cuda_Team.hpp>
+#include <Cuda/Kokkos_Cuda_Task.hpp>
+#include <Cuda/Kokkos_Cuda_MDRangePolicy.hpp>
+#include <Cuda/Kokkos_Cuda_UniqueToken.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HBWSpace.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HBWSpace.hpp
new file mode 100644 (file)
index 0000000..ff5133e
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_HBWSPACE_HPP
+#define KOKKOS_DECLARE_HBWSPACE_HPP
+
+#ifdef KOKKOS_ENABLE_HBWSPACE
+#include <Kokkos_HBWSpace.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HIP.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HIP.hpp
new file mode 100644 (file)
index 0000000..b3bf14d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_HIP_HPP
+#define KOKKOS_DECLARE_HIP_HPP
+
+#if defined(KOKKOS_ENABLE_HIP)
+#include <Kokkos_HIP.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HPX.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_HPX.hpp
new file mode 100644 (file)
index 0000000..2cbecc9
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_HPX_HPP
+#define KOKKOS_DECLARE_HPX_HPP
+
+#if defined(KOKKOS_ENABLE_HPX)
+#include <Kokkos_HPX.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENACC.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENACC.hpp
new file mode 100644 (file)
index 0000000..5c09b7a
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_OPENACC_HPP
+#define KOKKOS_DECLARE_OPENACC_HPP
+
+#if defined(KOKKOS_ENABLE_OPENACC)
+#include <OpenACC/Kokkos_OpenACC.hpp>
+#include <OpenACC/Kokkos_OpenACCSpace.hpp>
+#include <OpenACC/Kokkos_OpenACC_Traits.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENMP.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENMP.hpp
new file mode 100644 (file)
index 0000000..069dd5c
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_OPENMP_HPP
+#define KOKKOS_DECLARE_OPENMP_HPP
+
+#if defined(KOKKOS_ENABLE_OPENMP)
+#include <Kokkos_OpenMP.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENMPTARGET.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_OPENMPTARGET.hpp
new file mode 100644 (file)
index 0000000..b193d1e
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_OPENMPTARGET_HPP
+#define KOKKOS_DECLARE_OPENMPTARGET_HPP
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET)
+#include <Kokkos_OpenMPTarget.hpp>
+#include <Kokkos_OpenMPTargetSpace.hpp>
+#include <OpenMPTarget/Kokkos_OpenMPTarget_UniqueToken.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_SERIAL.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_SERIAL.hpp
new file mode 100644 (file)
index 0000000..45661b5
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_SERIAL_HPP
+#define KOKKOS_DECLARE_SERIAL_HPP
+
+#if defined(KOKKOS_ENABLE_SERIAL)
+#include <Kokkos_Serial.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_SYCL.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_SYCL.hpp
new file mode 100644 (file)
index 0000000..72a26b2
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_SYCL_HPP
+#define KOKKOS_DECLARE_SYCL_HPP
+
+#if defined(KOKKOS_ENABLE_SYCL)
+#include <Kokkos_SYCL.hpp>
+#include <SYCL/Kokkos_SYCL_Half_Impl_Type.hpp>
+#include <SYCL/Kokkos_SYCL_Half_Conversion.hpp>
+#include <SYCL/Kokkos_SYCL_DeepCopy.hpp>
+#include <SYCL/Kokkos_SYCL_MDRangePolicy.hpp>
+#include <SYCL/Kokkos_SYCL_Parallel_Range.hpp>
+#include <SYCL/Kokkos_SYCL_Parallel_Reduce.hpp>
+#include <SYCL/Kokkos_SYCL_Parallel_Scan.hpp>
+#include <SYCL/Kokkos_SYCL_Parallel_Team.hpp>
+#include <SYCL/Kokkos_SYCL_UniqueToken.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_THREADS.hpp b/bundled/kokkos-3.7.00/core/src/decl/Kokkos_Declare_THREADS.hpp
new file mode 100644 (file)
index 0000000..adb8f12
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DECLARE_THREADS_HPP
+#define KOKKOS_DECLARE_THREADS_HPP
+
+#if defined(KOKKOS_ENABLE_THREADS)
+#include <Kokkos_Threads.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/dummy.cpp b/bundled/kokkos-3.7.00/core/src/dummy.cpp
new file mode 100644 (file)
index 0000000..4f5f14e
--- /dev/null
@@ -0,0 +1,10 @@
+
+
+namespace Kokkos {
+namespace AvoidCompilerWarnings {
+int dontComplain() {
+  // keep the compiler from complaining about emptiness
+  return 0;
+}
+}  // namespace AvoidCompilerWarnings
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_CUDA.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_CUDA.hpp
new file mode 100644 (file)
index 0000000..4bda5e9
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_FWD_HPP_
+#define KOKKOS_CUDA_FWD_HPP_
+#if defined(KOKKOS_ENABLE_CUDA)
+namespace Kokkos {
+
+class CudaSpace;            ///< Memory space on Cuda GPU
+class CudaUVMSpace;         ///< Memory space on Cuda GPU with UVM
+class CudaHostPinnedSpace;  ///< Memory space on Host accessible to Cuda GPU
+class Cuda;                 ///< Execution space for Cuda GPU
+
+namespace Impl {
+
+template <class ExecSpace>
+void cuda_prefetch_pointer(const ExecSpace& /*space*/, const void* /*ptr*/,
+                           size_t /*bytes*/, bool /*to_device*/) {}
+
+void cuda_prefetch_pointer(const Cuda& space, const void* ptr, size_t bytes,
+                           bool to_device);
+
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HBWSpace.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HBWSpace.hpp
new file mode 100644 (file)
index 0000000..d9dada2
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HBWSPACE_FWD_HPP_
+#define KOKKOS_HBWSPACE_FWD_HPP_
+
+#ifdef KOKKOS_ENABLE_HBWSPACE
+namespace Kokkos {
+
+namespace Experimental {
+class HBWSpace;  /// Memory space for hbw_malloc from memkind (e.g. for KNL
+                 /// processor)
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HIP.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HIP.hpp
new file mode 100644 (file)
index 0000000..2754086
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HIP_FWD_HPP_
+#define KOKKOS_HIP_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_HIP)
+namespace Kokkos {
+namespace Experimental {
+class HIPSpace;            ///< Memory space on HIP GPU
+class HIPHostPinnedSpace;  ///< Memory space on Host accessible to HIP GPU
+class HIPManagedSpace;     ///< Memory migratable between Host and HIP GPU
+class HIP;                 ///< Execution space for HIP GPU
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HPX.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_HPX.hpp
new file mode 100644 (file)
index 0000000..8949c52
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HPX_FWD_HPP_
+#define KOKKOS_HPX_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_HPX)
+namespace Kokkos {
+namespace Experimental {
+class HPX;  ///< Execution space with HPX back-end.
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENACC.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENACC.hpp
new file mode 100644 (file)
index 0000000..d733f99
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENACC_FWD_HPP_
+#define KOKKOS_OPENACC_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_OPENACC)
+namespace Kokkos {
+namespace Experimental {
+class OpenACC;  ///< OpenACC execution space.
+class OpenACCSpace;
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENMP.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENMP.hpp
new file mode 100644 (file)
index 0000000..fc2223d
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMP_FWD_HPP_
+#define KOKKOS_OPENMP_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_OPENMP)
+namespace Kokkos {
+class OpenMP;  ///< OpenMP execution space.
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENMPTARGET.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_OPENMPTARGET.hpp
new file mode 100644 (file)
index 0000000..8d12b8b
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_OPENMPTARGET_FWD_HPP_
+#define KOKKOS_OPENMPTARGET_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_OPENMPTARGET)
+namespace Kokkos {
+namespace Experimental {
+class OpenMPTarget;  ///< OpenMPTarget execution space.
+class OpenMPTargetSpace;
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_SERIAL.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_SERIAL.hpp
new file mode 100644 (file)
index 0000000..8f253d0
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SERIAL_FWD_HPP_
+#define KOKKOS_SERIAL_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_SERIAL)
+namespace Kokkos {
+class Serial;  ///< Execution space main process on CPU.
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_SYCL.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_SYCL.hpp
new file mode 100644 (file)
index 0000000..0ce680c
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SYCL_FWD_HPP_
+#define KOKKOS_SYCL_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_SYCL)
+namespace Kokkos {
+namespace Experimental {
+class SYCLDeviceUSMSpace;  ///< Memory space on SYCL device, not accessible from
+                           ///< the host
+class SYCLSharedUSMSpace;  ///< Memory space accessible from both the SYCL
+                           ///< device and the host
+class SYCLHostUSMSpace;    ///< Memory space accessible from both the SYCL
+                           ///< device and the host (host pinned)
+class SYCL;                ///< Execution space for SYCL
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_THREADS.hpp b/bundled/kokkos-3.7.00/core/src/fwd/Kokkos_Fwd_THREADS.hpp
new file mode 100644 (file)
index 0000000..af53777
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_THREADS_FWD_HPP_
+#define KOKKOS_THREADS_FWD_HPP_
+
+#if defined(KOKKOS_ENABLE_THREADS)
+namespace Kokkos {
+class Threads;  ///< Execution space with C++11 threads back-end.
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/KokkosExp_Host_IterateTile.hpp b/bundled/kokkos-3.7.00/core/src/impl/KokkosExp_Host_IterateTile.hpp
new file mode 100644 (file)
index 0000000..2b2120c
--- /dev/null
@@ -0,0 +1,2766 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HOST_EXP_ITERATE_TILE_HPP
+#define KOKKOS_HOST_EXP_ITERATE_TILE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_AGGRESSIVE_VECTORIZATION) && \
+    defined(KOKKOS_ENABLE_PRAGMA_IVDEP) && !defined(__CUDA_ARCH__)
+#define KOKKOS_MDRANGE_IVDEP
+#endif
+
+#ifdef KOKKOS_MDRANGE_IVDEP
+#define KOKKOS_ENABLE_IVDEP_MDRANGE _Pragma("ivdep")
+#else
+#define KOKKOS_ENABLE_IVDEP_MDRANGE
+#endif
+
+#include <algorithm>
+
+namespace Kokkos {
+namespace Impl {
+
+// Temporary, for testing new loop macros
+#define KOKKOS_ENABLE_NEW_LOOP_MACROS 1
+
+#define LOOP_1L(type, tile)   \
+  KOKKOS_ENABLE_IVDEP_MDRANGE \
+  for (type i0 = 0; i0 < static_cast<type>(tile[0]); ++i0)
+
+#define LOOP_2L(type, tile) \
+  for (type i1 = 0; i1 < static_cast<type>(tile[1]); ++i1) LOOP_1L(type, tile)
+
+#define LOOP_3L(type, tile) \
+  for (type i2 = 0; i2 < static_cast<type>(tile[2]); ++i2) LOOP_2L(type, tile)
+
+#define LOOP_4L(type, tile) \
+  for (type i3 = 0; i3 < static_cast<type>(tile[3]); ++i3) LOOP_3L(type, tile)
+
+#define LOOP_5L(type, tile) \
+  for (type i4 = 0; i4 < static_cast<type>(tile[4]); ++i4) LOOP_4L(type, tile)
+
+#define LOOP_6L(type, tile) \
+  for (type i5 = 0; i5 < static_cast<type>(tile[5]); ++i5) LOOP_5L(type, tile)
+
+#define LOOP_7L(type, tile) \
+  for (type i6 = 0; i6 < static_cast<type>(tile[6]); ++i6) LOOP_6L(type, tile)
+
+#define LOOP_8L(type, tile) \
+  for (type i7 = 0; i7 < static_cast<type>(tile[7]); ++i7) LOOP_7L(type, tile)
+
+#define LOOP_1R(type, tile)   \
+  KOKKOS_ENABLE_IVDEP_MDRANGE \
+  for (type i0 = 0; i0 < static_cast<type>(tile[0]); ++i0)
+
+#define LOOP_2R(type, tile) \
+  LOOP_1R(type, tile)       \
+  for (type i1 = 0; i1 < static_cast<type>(tile[1]); ++i1)
+
+#define LOOP_3R(type, tile) \
+  LOOP_2R(type, tile)       \
+  for (type i2 = 0; i2 < static_cast<type>(tile[2]); ++i2)
+
+#define LOOP_4R(type, tile) \
+  LOOP_3R(type, tile)       \
+  for (type i3 = 0; i3 < static_cast<type>(tile[3]); ++i3)
+
+#define LOOP_5R(type, tile) \
+  LOOP_4R(type, tile)       \
+  for (type i4 = 0; i4 < static_cast<type>(tile[4]); ++i4)
+
+#define LOOP_6R(type, tile) \
+  LOOP_5R(type, tile)       \
+  for (type i5 = 0; i5 < static_cast<type>(tile[5]); ++i5)
+
+#define LOOP_7R(type, tile) \
+  LOOP_6R(type, tile)       \
+  for (type i6 = 0; i6 < static_cast<type>(tile[6]); ++i6)
+
+#define LOOP_8R(type, tile) \
+  LOOP_7R(type, tile)       \
+  for (type i7 = 0; i7 < static_cast<type>(tile[7]); ++i7)
+
+#define LOOP_ARGS_1 i0 + m_offset[0]
+#define LOOP_ARGS_2 LOOP_ARGS_1, i1 + m_offset[1]
+#define LOOP_ARGS_3 LOOP_ARGS_2, i2 + m_offset[2]
+#define LOOP_ARGS_4 LOOP_ARGS_3, i3 + m_offset[3]
+#define LOOP_ARGS_5 LOOP_ARGS_4, i4 + m_offset[4]
+#define LOOP_ARGS_6 LOOP_ARGS_5, i5 + m_offset[5]
+#define LOOP_ARGS_7 LOOP_ARGS_6, i6 + m_offset[6]
+#define LOOP_ARGS_8 LOOP_ARGS_7, i7 + m_offset[7]
+
+// New Loop Macros...
+// parallel_for, non-tagged
+#define APPLY(func, ...) func(__VA_ARGS__);
+
+// LayoutRight
+// d = 0 to start
+#define LOOP_R_1(func, type, m_offset, extent, d, ...)               \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                        \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+    APPLY(func, __VA_ARGS__, i0 + m_offset[d])                       \
+  }
+
+#define LOOP_R_2(func, type, m_offset, extent, d, ...)               \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+    LOOP_R_1(func, type, m_offset, extent, d + 1, __VA_ARGS__,       \
+             i1 + m_offset[d])                                       \
+  }
+
+#define LOOP_R_3(func, type, m_offset, extent, d, ...)               \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+    LOOP_R_2(func, type, m_offset, extent, d + 1, __VA_ARGS__,       \
+             i2 + m_offset[d])                                       \
+  }
+
+#define LOOP_R_4(func, type, m_offset, extent, d, ...)               \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+    LOOP_R_3(func, type, m_offset, extent, d + 1, __VA_ARGS__,       \
+             i3 + m_offset[d])                                       \
+  }
+
+#define LOOP_R_5(func, type, m_offset, extent, d, ...)               \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+    LOOP_R_4(func, type, m_offset, extent, d + 1, __VA_ARGS__,       \
+             i4 + m_offset[d])                                       \
+  }
+
+#define LOOP_R_6(func, type, m_offset, extent, d, ...)               \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+    LOOP_R_5(func, type, m_offset, extent, d + 1, __VA_ARGS__,       \
+             i5 + m_offset[d])                                       \
+  }
+
+#define LOOP_R_7(func, type, m_offset, extent, d, ...)               \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+    LOOP_R_6(func, type, m_offset, extent, d + 1, __VA_ARGS__,       \
+             i6 + m_offset[d])                                       \
+  }
+
+#define LOOP_R_8(func, type, m_offset, extent, d, ...)               \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+    LOOP_R_7(func, type, m_offset, extent, d + 1, __VA_ARGS__,       \
+             i7 + m_offset[d])                                       \
+  }
+
+// LayoutLeft
+// d = rank-1 to start
+#define LOOP_L_1(func, type, m_offset, extent, d, ...)               \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                        \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+    APPLY(func, i0 + m_offset[d], __VA_ARGS__)                       \
+  }
+
+#define LOOP_L_2(func, type, m_offset, extent, d, ...)               \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+    LOOP_L_1(func, type, m_offset, extent, d - 1, i1 + m_offset[d],  \
+             __VA_ARGS__)                                            \
+  }
+
+#define LOOP_L_3(func, type, m_offset, extent, d, ...)               \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+    LOOP_L_2(func, type, m_offset, extent, d - 1, i2 + m_offset[d],  \
+             __VA_ARGS__)                                            \
+  }
+
+#define LOOP_L_4(func, type, m_offset, extent, d, ...)               \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+    LOOP_L_3(func, type, m_offset, extent, d - 1, i3 + m_offset[d],  \
+             __VA_ARGS__)                                            \
+  }
+
+#define LOOP_L_5(func, type, m_offset, extent, d, ...)               \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+    LOOP_L_4(func, type, m_offset, extent, d - 1, i4 + m_offset[d],  \
+             __VA_ARGS__)                                            \
+  }
+
+#define LOOP_L_6(func, type, m_offset, extent, d, ...)               \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+    LOOP_L_5(func, type, m_offset, extent, d - 1, i5 + m_offset[d],  \
+             __VA_ARGS__)                                            \
+  }
+
+#define LOOP_L_7(func, type, m_offset, extent, d, ...)               \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+    LOOP_L_6(func, type, m_offset, extent, d - 1, i6 + m_offset[d],  \
+             __VA_ARGS__)                                            \
+  }
+
+#define LOOP_L_8(func, type, m_offset, extent, d, ...)               \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+    LOOP_L_7(func, type, m_offset, extent, d - 1, i7 + m_offset[d],  \
+             __VA_ARGS__)                                            \
+  }
+
+// Left vs Right
+// TODO: rank not necessary to pass through, can hardcode the values
+#define LOOP_LAYOUT_1(func, type, is_left, m_offset, extent, rank)   \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                        \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) { \
+    APPLY(func, i0 + m_offset[0])                                    \
+  }
+
+#define LOOP_LAYOUT_2(func, type, is_left, m_offset, extent, rank)            \
+  if (is_left) {                                                              \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
+      LOOP_L_1(func, type, m_offset, extent, rank - 2,                        \
+               i1 + m_offset[rank - 1])                                       \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) {        \
+      LOOP_R_1(func, type, m_offset, extent, 1, i1 + m_offset[0])             \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_3(func, type, is_left, m_offset, extent, rank)            \
+  if (is_left) {                                                              \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
+      LOOP_L_2(func, type, m_offset, extent, rank - 2,                        \
+               i2 + m_offset[rank - 1])                                       \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) {        \
+      LOOP_R_2(func, type, m_offset, extent, 1, i2 + m_offset[0])             \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_4(func, type, is_left, m_offset, extent, rank)            \
+  if (is_left) {                                                              \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
+      LOOP_L_3(func, type, m_offset, extent, rank - 2,                        \
+               i3 + m_offset[rank - 1])                                       \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) {        \
+      LOOP_R_3(func, type, m_offset, extent, 1, i3 + m_offset[0])             \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_5(func, type, is_left, m_offset, extent, rank)            \
+  if (is_left) {                                                              \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
+      LOOP_L_4(func, type, m_offset, extent, rank - 2,                        \
+               i4 + m_offset[rank - 1])                                       \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) {        \
+      LOOP_R_4(func, type, m_offset, extent, 1, i4 + m_offset[0])             \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_6(func, type, is_left, m_offset, extent, rank)            \
+  if (is_left) {                                                              \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
+      LOOP_L_5(func, type, m_offset, extent, rank - 2,                        \
+               i5 + m_offset[rank - 1])                                       \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) {        \
+      LOOP_R_5(func, type, m_offset, extent, 1, i5 + m_offset[0])             \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_7(func, type, is_left, m_offset, extent, rank)            \
+  if (is_left) {                                                              \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
+      LOOP_L_6(func, type, m_offset, extent, rank - 2,                        \
+               i6 + m_offset[rank - 1])                                       \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) {        \
+      LOOP_R_6(func, type, m_offset, extent, 1, i6 + m_offset[0])             \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_8(func, type, is_left, m_offset, extent, rank)            \
+  if (is_left) {                                                              \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
+      LOOP_L_7(func, type, m_offset, extent, rank - 2,                        \
+               i7 + m_offset[rank - 1])                                       \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) {        \
+      LOOP_R_7(func, type, m_offset, extent, 1, i7 + m_offset[0])             \
+    }                                                                         \
+  }
+
+// Partial vs Full Tile
+#define TILE_LOOP_1(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_1(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_1(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+#define TILE_LOOP_2(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_2(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_2(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+#define TILE_LOOP_3(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_3(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_3(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+#define TILE_LOOP_4(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_4(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_4(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+#define TILE_LOOP_5(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_5(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_5(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+#define TILE_LOOP_6(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_6(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_6(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+#define TILE_LOOP_7(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_7(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_7(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+#define TILE_LOOP_8(func, type, is_left, cond, m_offset, extent_full,  \
+                    extent_partial, rank)                              \
+  if (cond) {                                                          \
+    LOOP_LAYOUT_8(func, type, is_left, m_offset, extent_full, rank)    \
+  } else {                                                             \
+    LOOP_LAYOUT_8(func, type, is_left, m_offset, extent_partial, rank) \
+  }
+
+// parallel_reduce, non-tagged
+// Reduction version
+#define APPLY_REDUX(val, func, ...) func(__VA_ARGS__, val);
+
+// LayoutRight
+// d = 0 to start
+#define LOOP_R_1_REDUX(val, func, type, m_offset, extent, d, ...)    \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                        \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+    APPLY_REDUX(val, func, __VA_ARGS__, i0 + m_offset[d])            \
+  }
+
+#define LOOP_R_2_REDUX(val, func, type, m_offset, extent, d, ...)         \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) {      \
+    LOOP_R_1_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                   i1 + m_offset[d])                                      \
+  }
+
+#define LOOP_R_3_REDUX(val, func, type, m_offset, extent, d, ...)         \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) {      \
+    LOOP_R_2_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                   i2 + m_offset[d])                                      \
+  }
+
+#define LOOP_R_4_REDUX(val, func, type, m_offset, extent, d, ...)         \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) {      \
+    LOOP_R_3_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                   i3 + m_offset[d])                                      \
+  }
+
+#define LOOP_R_5_REDUX(val, func, type, m_offset, extent, d, ...)         \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) {      \
+    LOOP_R_4_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                   i4 + m_offset[d])                                      \
+  }
+
+#define LOOP_R_6_REDUX(val, func, type, m_offset, extent, d, ...)         \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) {      \
+    LOOP_R_5_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                   i5 + m_offset[d])                                      \
+  }
+
+#define LOOP_R_7_REDUX(val, func, type, m_offset, extent, d, ...)         \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) {      \
+    LOOP_R_6_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                   i6 + m_offset[d])                                      \
+  }
+
+#define LOOP_R_8_REDUX(val, func, type, m_offset, extent, d, ...)         \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) {      \
+    LOOP_R_7_REDUX(val, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                   i7 + m_offset[d])                                      \
+  }
+
+// LayoutLeft
+// d = rank-1 to start
+#define LOOP_L_1_REDUX(val, func, type, m_offset, extent, d, ...)    \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                        \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+    APPLY_REDUX(val, func, i0 + m_offset[d], __VA_ARGS__)            \
+  }
+
+#define LOOP_L_2_REDUX(val, func, type, m_offset, extent, d, ...)              \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) {           \
+    LOOP_L_1_REDUX(val, func, type, m_offset, extent, d - 1, i1 + m_offset[d], \
+                   __VA_ARGS__)                                                \
+  }
+
+#define LOOP_L_3_REDUX(val, func, type, m_offset, extent, d, ...)              \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) {           \
+    LOOP_L_2_REDUX(val, func, type, m_offset, extent, d - 1, i2 + m_offset[d], \
+                   __VA_ARGS__)                                                \
+  }
+
+#define LOOP_L_4_REDUX(val, func, type, m_offset, extent, d, ...)              \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) {           \
+    LOOP_L_3_REDUX(val, func, type, m_offset, extent, d - 1, i3 + m_offset[d], \
+                   __VA_ARGS__)                                                \
+  }
+
+#define LOOP_L_5_REDUX(val, func, type, m_offset, extent, d, ...)              \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) {           \
+    LOOP_L_4_REDUX(val, func, type, m_offset, extent, d - 1, i4 + m_offset[d], \
+                   __VA_ARGS__)                                                \
+  }
+
+#define LOOP_L_6_REDUX(val, func, type, m_offset, extent, d, ...)              \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) {           \
+    LOOP_L_5_REDUX(val, func, type, m_offset, extent, d - 1, i5 + m_offset[d], \
+                   __VA_ARGS__)                                                \
+  }
+
+#define LOOP_L_7_REDUX(val, func, type, m_offset, extent, d, ...)              \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) {           \
+    LOOP_L_6_REDUX(val, func, type, m_offset, extent, d - 1, i6 + m_offset[d], \
+                   __VA_ARGS__)                                                \
+  }
+
+#define LOOP_L_8_REDUX(val, func, type, m_offset, extent, d, ...)              \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) {           \
+    LOOP_L_7_REDUX(val, func, type, m_offset, extent, d - 1, i7 + m_offset[d], \
+                   __VA_ARGS__)                                                \
+  }
+
+// Left vs Right
+#define LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                                 \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) {          \
+    APPLY_REDUX(val, func, i0 + m_offset[0])                                  \
+  }
+
+#define LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                              \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
+      LOOP_L_1_REDUX(val, func, type, m_offset, extent, rank - 2,             \
+                     i1 + m_offset[rank - 1])                                 \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) {        \
+      LOOP_R_1_REDUX(val, func, type, m_offset, extent, 1, i1 + m_offset[0])  \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                              \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
+      LOOP_L_2_REDUX(val, func, type, m_offset, extent, rank - 2,             \
+                     i2 + m_offset[rank - 1])                                 \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) {        \
+      LOOP_R_2_REDUX(val, func, type, m_offset, extent, 1, i2 + m_offset[0])  \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                              \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
+      LOOP_L_3_REDUX(val, func, type, m_offset, extent, rank - 2,             \
+                     i3 + m_offset[rank - 1])                                 \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) {        \
+      LOOP_R_3_REDUX(val, func, type, m_offset, extent, 1, i3 + m_offset[0])  \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                              \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
+      LOOP_L_4_REDUX(val, func, type, m_offset, extent, rank - 2,             \
+                     i4 + m_offset[rank - 1])                                 \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) {        \
+      LOOP_R_4_REDUX(val, func, type, m_offset, extent, 1, i4 + m_offset[0])  \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                              \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
+      LOOP_L_5_REDUX(val, func, type, m_offset, extent, rank - 2,             \
+                     i5 + m_offset[rank - 1])                                 \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) {        \
+      LOOP_R_5_REDUX(val, func, type, m_offset, extent, 1, i5 + m_offset[0])  \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                              \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
+      LOOP_L_6_REDUX(val, func, type, m_offset, extent, rank - 2,             \
+                     i6 + m_offset[rank - 1])                                 \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) {        \
+      LOOP_R_6_REDUX(val, func, type, m_offset, extent, 1, i6 + m_offset[0])  \
+    }                                                                         \
+  }
+
+#define LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                              \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
+      LOOP_L_7_REDUX(val, func, type, m_offset, extent, rank - 2,             \
+                     i7 + m_offset[rank - 1])                                 \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) {        \
+      LOOP_R_7_REDUX(val, func, type, m_offset, extent, 1, i7 + m_offset[0])  \
+    }                                                                         \
+  }
+
+// Partial vs Full Tile
+#define TILE_LOOP_1_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_1_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+
+#define TILE_LOOP_2_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_2_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+
+#define TILE_LOOP_3_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_3_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+
+#define TILE_LOOP_4_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_4_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+
+#define TILE_LOOP_5_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_5_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+
+#define TILE_LOOP_6_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_6_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+
+#define TILE_LOOP_7_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_7_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+
+#define TILE_LOOP_8_REDUX(val, func, type, is_left, cond, m_offset,            \
+                          extent_full, extent_partial, rank)                   \
+  if (cond) {                                                                  \
+    LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, extent_full, rank) \
+  } else {                                                                     \
+    LOOP_LAYOUT_8_REDUX(val, func, type, is_left, m_offset, extent_partial,    \
+                        rank)                                                  \
+  }
+// end New Loop Macros
+
+// tagged macros
+#define TAGGED_APPLY(tag, func, ...) func(tag, __VA_ARGS__);
+
+// LayoutRight
+// d = 0 to start
+#define TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, d, ...)   \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                        \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+    TAGGED_APPLY(tag, func, __VA_ARGS__, i0 + m_offset[d])           \
+  }
+
+#define TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, d, ...)         \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) {       \
+    TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                    i1 + m_offset[d])                                      \
+  }
+
+#define TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, d, ...)         \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) {       \
+    TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                    i2 + m_offset[d])                                      \
+  }
+
+#define TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, d, ...)         \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) {       \
+    TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                    i3 + m_offset[d])                                      \
+  }
+
+#define TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, d, ...)         \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) {       \
+    TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                    i4 + m_offset[d])                                      \
+  }
+
+#define TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, d, ...)         \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) {       \
+    TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                    i5 + m_offset[d])                                      \
+  }
+
+#define TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, d, ...)         \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) {       \
+    TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                    i6 + m_offset[d])                                      \
+  }
+
+#define TAGGED_LOOP_R_8(tag, func, type, m_offset, extent, d, ...)         \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) {       \
+    TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, d + 1, __VA_ARGS__, \
+                    i7 + m_offset[d])                                      \
+  }
+
+// LayoutLeft
+// d = rank-1 to start
+#define TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, d, ...)   \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                        \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) { \
+    TAGGED_APPLY(tag, func, i0 + m_offset[d], __VA_ARGS__)           \
+  }
+
+#define TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, d, ...)   \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) { \
+    TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, d - 1,        \
+                    i1 + m_offset[d], __VA_ARGS__)                   \
+  }
+
+#define TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, d, ...)   \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) { \
+    TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, d - 1,        \
+                    i2 + m_offset[d], __VA_ARGS__)                   \
+  }
+
+#define TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, d, ...)   \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) { \
+    TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, d - 1,        \
+                    i3 + m_offset[d], __VA_ARGS__)                   \
+  }
+
+#define TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, d, ...)   \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) { \
+    TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, d - 1,        \
+                    i4 + m_offset[d], __VA_ARGS__)                   \
+  }
+
+#define TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, d, ...)   \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) { \
+    TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, d - 1,        \
+                    i5 + m_offset[d], __VA_ARGS__)                   \
+  }
+
+#define TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, d, ...)   \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) { \
+    TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, d - 1,        \
+                    i6 + m_offset[d], __VA_ARGS__)                   \
+  }
+
+#define TAGGED_LOOP_L_8(tag, func, type, m_offset, extent, d, ...)   \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) { \
+    TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, d - 1,        \
+                    i7 + m_offset[d], __VA_ARGS__)                   \
+  }
+
+// Left vs Right
+// TODO: rank not necessary to pass through, can hardcode the values
+#define TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, extent, rank) \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                                  \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) {           \
+    TAGGED_APPLY(tag, func, i0 + m_offset[0])                                  \
+  }
+
+#define TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                               \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) {  \
+      TAGGED_LOOP_L_1(tag, func, type, m_offset, extent, rank - 2,             \
+                      i1 + m_offset[rank - 1])                                 \
+    }                                                                          \
+  } else {                                                                     \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) {         \
+      TAGGED_LOOP_R_1(tag, func, type, m_offset, extent, 1, i1 + m_offset[0])  \
+    }                                                                          \
+  }
+
+#define TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                               \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) {  \
+      TAGGED_LOOP_L_2(tag, func, type, m_offset, extent, rank - 2,             \
+                      i2 + m_offset[rank - 1])                                 \
+    }                                                                          \
+  } else {                                                                     \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) {         \
+      TAGGED_LOOP_R_2(tag, func, type, m_offset, extent, 1, i2 + m_offset[0])  \
+    }                                                                          \
+  }
+
+#define TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                               \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) {  \
+      TAGGED_LOOP_L_3(tag, func, type, m_offset, extent, rank - 2,             \
+                      i3 + m_offset[rank - 1])                                 \
+    }                                                                          \
+  } else {                                                                     \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) {         \
+      TAGGED_LOOP_R_3(tag, func, type, m_offset, extent, 1, i3 + m_offset[0])  \
+    }                                                                          \
+  }
+
+#define TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                               \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) {  \
+      TAGGED_LOOP_L_4(tag, func, type, m_offset, extent, rank - 2,             \
+                      i4 + m_offset[rank - 1])                                 \
+    }                                                                          \
+  } else {                                                                     \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) {         \
+      TAGGED_LOOP_R_4(tag, func, type, m_offset, extent, 1, i4 + m_offset[0])  \
+    }                                                                          \
+  }
+
+#define TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                               \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) {  \
+      TAGGED_LOOP_L_5(tag, func, type, m_offset, extent, rank - 2,             \
+                      i5 + m_offset[rank - 1])                                 \
+    }                                                                          \
+  } else {                                                                     \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) {         \
+      TAGGED_LOOP_R_5(tag, func, type, m_offset, extent, 1, i5 + m_offset[0])  \
+    }                                                                          \
+  }
+
+#define TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                               \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) {  \
+      TAGGED_LOOP_L_6(tag, func, type, m_offset, extent, rank - 2,             \
+                      i6 + m_offset[rank - 1])                                 \
+    }                                                                          \
+  } else {                                                                     \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) {         \
+      TAGGED_LOOP_R_6(tag, func, type, m_offset, extent, 1, i6 + m_offset[0])  \
+    }                                                                          \
+  }
+
+#define TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, extent, rank) \
+  if (is_left) {                                                               \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) {  \
+      TAGGED_LOOP_L_7(tag, func, type, m_offset, extent, rank - 2,             \
+                      i7 + m_offset[rank - 1])                                 \
+    }                                                                          \
+  } else {                                                                     \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) {         \
+      TAGGED_LOOP_R_7(tag, func, type, m_offset, extent, 1, i7 + m_offset[0])  \
+    }                                                                          \
+  }
+
+// Partial vs Full Tile
+#define TAGGED_TILE_LOOP_1(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_1(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+#define TAGGED_TILE_LOOP_2(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_2(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+#define TAGGED_TILE_LOOP_3(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_3(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+#define TAGGED_TILE_LOOP_4(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_4(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+#define TAGGED_TILE_LOOP_5(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_5(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+#define TAGGED_TILE_LOOP_6(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_6(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+#define TAGGED_TILE_LOOP_7(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_7(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+#define TAGGED_TILE_LOOP_8(tag, func, type, is_left, cond, m_offset,         \
+                           extent_full, extent_partial, rank)                \
+  if (cond) {                                                                \
+    TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, extent_full,    \
+                         rank)                                               \
+  } else {                                                                   \
+    TAGGED_LOOP_LAYOUT_8(tag, func, type, is_left, m_offset, extent_partial, \
+                         rank)                                               \
+  }
+
+// parallel_reduce, tagged
+// Reduction version
+#define TAGGED_APPLY_REDUX(val, tag, func, ...) func(tag, __VA_ARGS__, val);
+
+// LayoutRight
+// d = 0 to start
+#define TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                                 \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) {          \
+    TAGGED_APPLY_REDUX(val, tag, func, __VA_ARGS__, i0 + m_offset[d])         \
+  }
+
+#define TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) {          \
+    TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, extent, d + 1,      \
+                          __VA_ARGS__, i1 + m_offset[d])                      \
+  }
+
+#define TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) {          \
+    TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, extent, d + 1,      \
+                          __VA_ARGS__, i2 + m_offset[d])                      \
+  }
+
+#define TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) {          \
+    TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, extent, d + 1,      \
+                          __VA_ARGS__, i3 + m_offset[d])                      \
+  }
+
+#define TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) {          \
+    TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, extent, d + 1,      \
+                          __VA_ARGS__, i4 + m_offset[d])                      \
+  }
+
+#define TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) {          \
+    TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, extent, d + 1,      \
+                          __VA_ARGS__, i5 + m_offset[d])                      \
+  }
+
+#define TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) {          \
+    TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, extent, d + 1,      \
+                          __VA_ARGS__, i6 + m_offset[d])                      \
+  }
+
+#define TAGGED_LOOP_R_8_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) {          \
+    TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, extent, d + 1,      \
+                          __VA_ARGS__, i7 + m_offset[d])                      \
+  }
+
+// LayoutLeft
+// d = rank-1 to start
+#define TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                                 \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[d]); ++i0) {          \
+    TAGGED_APPLY_REDUX(val, tag, func, i0 + m_offset[d], __VA_ARGS__)         \
+  }
+
+#define TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i1 = (type)0; i1 < static_cast<type>(extent[d]); ++i1) {          \
+    TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, extent, d - 1,      \
+                          i1 + m_offset[d], __VA_ARGS__)                      \
+  }
+
+#define TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i2 = (type)0; i2 < static_cast<type>(extent[d]); ++i2) {          \
+    TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, extent, d - 1,      \
+                          i2 + m_offset[d], __VA_ARGS__)                      \
+  }
+
+#define TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i3 = (type)0; i3 < static_cast<type>(extent[d]); ++i3) {          \
+    TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, extent, d - 1,      \
+                          i3 + m_offset[d], __VA_ARGS__)                      \
+  }
+
+#define TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i4 = (type)0; i4 < static_cast<type>(extent[d]); ++i4) {          \
+    TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, extent, d - 1,      \
+                          i4 + m_offset[d], __VA_ARGS__)                      \
+  }
+
+#define TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i5 = (type)0; i5 < static_cast<type>(extent[d]); ++i5) {          \
+    TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, extent, d - 1,      \
+                          i5 + m_offset[d], __VA_ARGS__)                      \
+  }
+
+#define TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i6 = (type)0; i6 < static_cast<type>(extent[d]); ++i6) {          \
+    TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, extent, d - 1,      \
+                          i6 + m_offset[d], __VA_ARGS__)                      \
+  }
+
+#define TAGGED_LOOP_L_8_REDUX(val, tag, func, type, m_offset, extent, d, ...) \
+  for (type i7 = (type)0; i7 < static_cast<type>(extent[d]); ++i7) {          \
+    TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, extent, d - 1,      \
+                          i7 + m_offset[d], __VA_ARGS__)                      \
+  }
+
+// Left vs Right
+#define TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, m_offset, \
+                                   extent, rank)                            \
+  KOKKOS_ENABLE_IVDEP_MDRANGE                                               \
+  for (type i0 = (type)0; i0 < static_cast<type>(extent[0]); ++i0) {        \
+    TAGGED_APPLY_REDUX(val, tag, func, i0 + m_offset[0])                    \
+  }
+
+#define TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, m_offset,   \
+                                   extent, rank)                              \
+  if (is_left) {                                                              \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[rank - 1]); ++i1) { \
+      TAGGED_LOOP_L_1_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
+                            i1 + m_offset[rank - 1])                          \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i1 = (type)0; i1 < static_cast<type>(extent[0]); ++i1) {        \
+      TAGGED_LOOP_R_1_REDUX(val, tag, func, type, m_offset, extent, 1,        \
+                            i1 + m_offset[0])                                 \
+    }                                                                         \
+  }
+
+#define TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, m_offset,   \
+                                   extent, rank)                              \
+  if (is_left) {                                                              \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[rank - 1]); ++i2) { \
+      TAGGED_LOOP_L_2_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
+                            i2 + m_offset[rank - 1])                          \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i2 = (type)0; i2 < static_cast<type>(extent[0]); ++i2) {        \
+      TAGGED_LOOP_R_2_REDUX(val, tag, func, type, m_offset, extent, 1,        \
+                            i2 + m_offset[0])                                 \
+    }                                                                         \
+  }
+
+#define TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, m_offset,   \
+                                   extent, rank)                              \
+  if (is_left) {                                                              \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[rank - 1]); ++i3) { \
+      TAGGED_LOOP_L_3_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
+                            i3 + m_offset[rank - 1])                          \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i3 = (type)0; i3 < static_cast<type>(extent[0]); ++i3) {        \
+      TAGGED_LOOP_R_3_REDUX(val, tag, func, type, m_offset, extent, 1,        \
+                            i3 + m_offset[0])                                 \
+    }                                                                         \
+  }
+
+#define TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, m_offset,   \
+                                   extent, rank)                              \
+  if (is_left) {                                                              \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[rank - 1]); ++i4) { \
+      TAGGED_LOOP_L_4_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
+                            i4 + m_offset[rank - 1])                          \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i4 = (type)0; i4 < static_cast<type>(extent[0]); ++i4) {        \
+      TAGGED_LOOP_R_4_REDUX(val, tag, func, type, m_offset, extent, 1,        \
+                            i4 + m_offset[0])                                 \
+    }                                                                         \
+  }
+
+#define TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, m_offset,   \
+                                   extent, rank)                              \
+  if (is_left) {                                                              \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[rank - 1]); ++i5) { \
+      TAGGED_LOOP_L_5_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
+                            i5 + m_offset[rank - 1])                          \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i5 = (type)0; i5 < static_cast<type>(extent[0]); ++i5) {        \
+      TAGGED_LOOP_R_5_REDUX(val, tag, func, type, m_offset, extent, 1,        \
+                            i5 + m_offset[0])                                 \
+    }                                                                         \
+  }
+
+#define TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, m_offset,   \
+                                   extent, rank)                              \
+  if (is_left) {                                                              \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[rank - 1]); ++i6) { \
+      TAGGED_LOOP_L_6_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
+                            i6 + m_offset[rank - 1])                          \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i6 = (type)0; i6 < static_cast<type>(extent[0]); ++i6) {        \
+      TAGGED_LOOP_R_6_REDUX(val, tag, func, type, m_offset, extent, 1,        \
+                            i6 + m_offset[0])                                 \
+    }                                                                         \
+  }
+
+#define TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, m_offset,   \
+                                   extent, rank)                              \
+  if (is_left) {                                                              \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[rank - 1]); ++i7) { \
+      TAGGED_LOOP_L_7_REDUX(val, tag, func, type, m_offset, extent, rank - 2, \
+                            i7 + m_offset[rank - 1])                          \
+    }                                                                         \
+  } else {                                                                    \
+    for (type i7 = (type)0; i7 < static_cast<type>(extent[0]); ++i7) {        \
+      TAGGED_LOOP_R_7_REDUX(val, tag, func, type, m_offset, extent, 1,        \
+                            i7 + m_offset[0])                                 \
+    }                                                                         \
+  }
+
+// Partial vs Full Tile
+#define TAGGED_TILE_LOOP_1_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_1_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+#define TAGGED_TILE_LOOP_2_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_2_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+#define TAGGED_TILE_LOOP_3_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_3_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+#define TAGGED_TILE_LOOP_4_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_4_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+#define TAGGED_TILE_LOOP_5_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_5_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+#define TAGGED_TILE_LOOP_6_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_6_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+#define TAGGED_TILE_LOOP_7_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_7_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+#define TAGGED_TILE_LOOP_8_REDUX(val, tag, func, type, is_left, cond,         \
+                                 m_offset, extent_full, extent_partial, rank) \
+  if (cond) {                                                                 \
+    TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_full, rank)                             \
+  } else {                                                                    \
+    TAGGED_LOOP_LAYOUT_8_REDUX(val, tag, func, type, is_left, m_offset,       \
+                               extent_partial, rank)                          \
+  }
+
+// end tagged macros
+
+// Structs for calling loops
+template <int Rank, bool IsLeft, typename IType, typename Tagged,
+          typename Enable = void>
+struct Tile_Loop_Type;
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<1, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_1(func, IType, IsLeft, cond, offset, a, b, 1);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_1_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 1);
+  }
+};
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<2, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_2(func, IType, IsLeft, cond, offset, a, b, 2);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_2_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 2);
+  }
+};
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<3, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_3(func, IType, IsLeft, cond, offset, a, b, 3);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_3_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 3);
+  }
+};
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<4, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_4(func, IType, IsLeft, cond, offset, a, b, 4);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_4_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 4);
+  }
+};
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<5, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_5(func, IType, IsLeft, cond, offset, a, b, 5);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_5_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 5);
+  }
+};
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<6, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_6(func, IType, IsLeft, cond, offset, a, b, 6);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_6_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 6);
+  }
+};
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<7, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_7(func, IType, IsLeft, cond, offset, a, b, 7);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_7_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 7);
+  }
+};
+
+template <bool IsLeft, typename IType>
+struct Tile_Loop_Type<8, IsLeft, IType, void, void> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_8(func, IType, IsLeft, cond, offset, a, b, 8);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TILE_LOOP_8_REDUX(value, func, IType, IsLeft, cond, offset, a, b, 8);
+  }
+};
+
+// tagged versions
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<1, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_1(Tagged(), func, IType, IsLeft, cond, offset, a, b, 1);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_1_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 1);
+  }
+};
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<2, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_2(Tagged(), func, IType, IsLeft, cond, offset, a, b, 2);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_2_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 2);
+  }
+};
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<3, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_3(Tagged(), func, IType, IsLeft, cond, offset, a, b, 3);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_3_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 3);
+  }
+};
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<4, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_4(Tagged(), func, IType, IsLeft, cond, offset, a, b, 4);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_4_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 4);
+  }
+};
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<5, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_5(Tagged(), func, IType, IsLeft, cond, offset, a, b, 5);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_5_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 5);
+  }
+};
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<6, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_6(Tagged(), func, IType, IsLeft, cond, offset, a, b, 6);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_6_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 6);
+  }
+};
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<7, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_7(Tagged(), func, IType, IsLeft, cond, offset, a, b, 7);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_7_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 7);
+  }
+};
+
+template <bool IsLeft, typename IType, typename Tagged>
+struct Tile_Loop_Type<8, IsLeft, IType, Tagged,
+                      std::enable_if_t<!std::is_void<Tagged>::value>> {
+  template <typename Func, typename Offset, typename ExtentA, typename ExtentB>
+  static void apply(Func const& func, bool cond, Offset const& offset,
+                    ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_8(Tagged(), func, IType, IsLeft, cond, offset, a, b, 8);
+  }
+
+  template <typename ValType, typename Func, typename Offset, typename ExtentA,
+            typename ExtentB>
+  static void apply(ValType& value, Func const& func, bool cond,
+                    Offset const& offset, ExtentA const& a, ExtentB const& b) {
+    TAGGED_TILE_LOOP_8_REDUX(value, Tagged(), func, IType, IsLeft, cond, offset,
+                             a, b, 8);
+  }
+};
+// end Structs for calling loops
+
+template <typename RP, typename Functor, typename Tag = void,
+          typename ValueType = void, typename Enable = void>
+struct HostIterateTile;
+
+// For ParallelFor
+template <typename RP, typename Functor, typename Tag, typename ValueType>
+struct HostIterateTile<RP, Functor, Tag, ValueType,
+                       std::enable_if_t<std::is_void<ValueType>::value>> {
+  using index_type = typename RP::index_type;
+  using point_type = typename RP::point_type;
+
+  using value_type = ValueType;
+
+  inline HostIterateTile(RP const& rp, Functor const& func)
+      : m_rp(rp), m_func(func) {}
+
+  inline bool check_iteration_bounds(point_type& partial_tile,
+                                     point_type& offset) const {
+    bool is_full_tile = true;
+
+    for (int i = 0; i < RP::rank; ++i) {
+      if ((offset[i] + m_rp.m_tile[i]) <= m_rp.m_upper[i]) {
+        partial_tile[i] = m_rp.m_tile[i];
+      } else {
+        is_full_tile = false;
+        partial_tile[i] =
+            (m_rp.m_upper[i] - 1 - offset[i]) == 0
+                ? 1
+                : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
+                      ? (m_rp.m_upper[i] - offset[i])
+                      : (m_rp.m_upper[i] -
+                         m_rp.m_lower[i]);  // when single tile encloses range
+      }
+    }
+
+    return is_full_tile;
+  }  // end check bounds
+
+  template <int Rank>
+  struct RankTag {
+    using type = RankTag<Rank>;
+    enum { value = (int)Rank };
+  };
+
+#if KOKKOS_ENABLE_NEW_LOOP_MACROS
+  template <typename IType>
+  inline void operator()(IType tile_idx) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    Tile_Loop_Type<RP::rank, (RP::inner_direction == Iterate::Left), index_type,
+                   Tag>::apply(m_func, full_tile, m_offset, m_rp.m_tile,
+                               m_tiledims);
+  }
+
+#else
+  template <typename IType>
+  inline void operator()(IType tile_idx) const {
+    operator_impl(tile_idx, RankTag<RP::rank>());
+  }
+  // added due to compiler error when using sfinae to choose operator based on
+  // rank w/ cuda+serial
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<2>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      } else {
+        //      #pragma simd
+        LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      } else {
+        //      #pragma simd
+        LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 2
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<3>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      } else {
+        //      #pragma simd
+        LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      } else {
+        //      #pragma simd
+        LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 3
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<4>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      } else {
+        //      #pragma simd
+        LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      } else {
+        //      #pragma simd
+        LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 4
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<5>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      } else {
+        //      #pragma simd
+        LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      } else {
+        //      #pragma simd
+        LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 5
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<6>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      } else {
+        //      #pragma simd
+        LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      } else {
+        //      #pragma simd
+        LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 6
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<7>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      } else {
+        //      #pragma simd
+        LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      } else {
+        //      #pragma simd
+        LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 7
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<8>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      } else {
+        //      #pragma simd
+        LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      } else {
+        //      #pragma simd
+        LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 8
+#endif
+
+  template <typename... Args>
+  std::enable_if_t<(sizeof...(Args) == RP::rank && std::is_void<Tag>::value),
+                   void>
+  apply(Args&&... args) const {
+    m_func(args...);
+  }
+
+  template <typename... Args>
+  std::enable_if_t<(sizeof...(Args) == RP::rank && !std::is_void<Tag>::value),
+                   void>
+  apply(Args&&... args) const {
+    m_func(m_tag, args...);
+  }
+
+  RP const& m_rp;
+  Functor const& m_func;
+  std::conditional_t<std::is_void<Tag>::value, int, Tag> m_tag;
+};
+
+// For ParallelReduce
+// ValueType - scalar: For reductions
+template <typename RP, typename Functor, typename Tag, typename ValueType>
+struct HostIterateTile<RP, Functor, Tag, ValueType,
+                       std::enable_if_t<!std::is_void<ValueType>::value &&
+                                        !std::is_array<ValueType>::value>> {
+  using index_type = typename RP::index_type;
+  using point_type = typename RP::point_type;
+
+  using value_type = ValueType;
+
+  inline HostIterateTile(RP const& rp, Functor const& func, value_type& v)
+      : m_rp(rp)  // Cuda 7.0 does not like braces...
+        ,
+        m_func(func),
+        m_v(v)  // use with non-void ValueType struct
+  {
+    // Errors due to braces rather than parenthesis for init (with cuda 7.0)
+    //      /home/ndellin/kokkos/core/src/impl/KokkosExp_Host_IterateTile.hpp:1216:98:
+    //      error: too many braces around initializer for â€˜int’ [-fpermissive]
+    //      /home/ndellin/kokkos/core/src/impl/KokkosExp_Host_IterateTile.hpp:1216:98:
+    //      error: aggregate value used where an integer was expected
+  }
+
+  inline bool check_iteration_bounds(point_type& partial_tile,
+                                     point_type& offset) const {
+    bool is_full_tile = true;
+
+    for (int i = 0; i < RP::rank; ++i) {
+      if ((offset[i] + m_rp.m_tile[i]) <= m_rp.m_upper[i]) {
+        partial_tile[i] = m_rp.m_tile[i];
+      } else {
+        is_full_tile = false;
+        partial_tile[i] =
+            (m_rp.m_upper[i] - 1 - offset[i]) == 0
+                ? 1
+                : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
+                      ? (m_rp.m_upper[i] - offset[i])
+                      : (m_rp.m_upper[i] -
+                         m_rp.m_lower[i]);  // when single tile encloses range
+      }
+    }
+
+    return is_full_tile;
+  }  // end check bounds
+
+  template <int Rank>
+  struct RankTag {
+    using type = RankTag<Rank>;
+    enum { value = (int)Rank };
+  };
+
+#if KOKKOS_ENABLE_NEW_LOOP_MACROS
+  template <typename IType>
+  inline void operator()(IType tile_idx) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    Tile_Loop_Type<RP::rank, (RP::inner_direction == Iterate::Left), index_type,
+                   Tag>::apply(m_v, m_func, full_tile, m_offset, m_rp.m_tile,
+                               m_tiledims);
+  }
+
+#else
+  template <typename IType>
+  inline void operator()(IType tile_idx) const {
+    operator_impl(tile_idx, RankTag<RP::rank>());
+  }
+  // added due to compiler error when using sfinae to choose operator based on
+  // rank
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<2>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      } else {
+        //      #pragma simd
+        LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      } else {
+        //      #pragma simd
+        LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 2
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<3>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      } else {
+        //      #pragma simd
+        LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      } else {
+        //      #pragma simd
+        LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 3
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<4>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      } else {
+        //      #pragma simd
+        LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      } else {
+        //      #pragma simd
+        LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 4
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<5>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      } else {
+        //      #pragma simd
+        LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      } else {
+        //      #pragma simd
+        LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 5
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<6>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      } else {
+        //      #pragma simd
+        LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      } else {
+        //      #pragma simd
+        LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 6
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<7>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      } else {
+        //      #pragma simd
+        LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      } else {
+        //      #pragma simd
+        LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 7
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<8>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      } else {
+        //      #pragma simd
+        LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      } else {
+        //      #pragma simd
+        LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 8
+#endif
+
+  template <typename... Args>
+  std::enable_if_t<(sizeof...(Args) == RP::rank && std::is_void<Tag>::value),
+                   void>
+  apply(Args&&... args) const {
+    m_func(args..., m_v);
+  }
+
+  template <typename... Args>
+  std::enable_if_t<(sizeof...(Args) == RP::rank && !std::is_void<Tag>::value),
+                   void>
+  apply(Args&&... args) const {
+    m_func(m_tag, args..., m_v);
+  }
+
+  RP const& m_rp;
+  Functor const& m_func;
+  value_type& m_v;
+  std::conditional_t<std::is_void<Tag>::value, int, Tag> m_tag;
+};
+
+// For ParallelReduce
+// Extra specialization for array reductions
+// ValueType[]: For array reductions
+template <typename RP, typename Functor, typename Tag, typename ValueType>
+struct HostIterateTile<RP, Functor, Tag, ValueType,
+                       std::enable_if_t<!std::is_void<ValueType>::value &&
+                                        std::is_array<ValueType>::value>> {
+  using index_type = typename RP::index_type;
+  using point_type = typename RP::point_type;
+
+  using value_type =
+      std::remove_extent_t<ValueType>;  // strip away the
+                                        // 'array-ness' [], only
+                                        // underlying type remains
+
+  inline HostIterateTile(
+      RP const& rp, Functor const& func,
+      value_type* v)  // v should be an array; treat as pointer for
+                      // compatibility since size is not known nor needed here
+      : m_rp(rp)      // Cuda 7.0 does not like braces...
+        ,
+        m_func(func),
+        m_v(v)  // use with non-void ValueType struct
+  {}
+
+  inline bool check_iteration_bounds(point_type& partial_tile,
+                                     point_type& offset) const {
+    bool is_full_tile = true;
+
+    for (int i = 0; i < RP::rank; ++i) {
+      if ((offset[i] + m_rp.m_tile[i]) <= m_rp.m_upper[i]) {
+        partial_tile[i] = m_rp.m_tile[i];
+      } else {
+        is_full_tile = false;
+        partial_tile[i] =
+            (m_rp.m_upper[i] - 1 - offset[i]) == 0
+                ? 1
+                : (m_rp.m_upper[i] - m_rp.m_tile[i]) > 0
+                      ? (m_rp.m_upper[i] - offset[i])
+                      : (m_rp.m_upper[i] -
+                         m_rp.m_lower[i]);  // when single tile encloses range
+      }
+    }
+
+    return is_full_tile;
+  }  // end check bounds
+
+  template <int Rank>
+  struct RankTag {
+    using type = RankTag<Rank>;
+    enum { value = (int)Rank };
+  };
+
+#if KOKKOS_ENABLE_NEW_LOOP_MACROS
+  template <typename IType>
+  inline void operator()(IType tile_idx) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    Tile_Loop_Type<RP::rank, (RP::inner_direction == Iterate::Left), index_type,
+                   Tag>::apply(m_v, m_func, full_tile, m_offset, m_rp.m_tile,
+                               m_tiledims);
+  }
+
+#else
+  template <typename IType>
+  inline void operator()(IType tile_idx) const {
+    operator_impl(tile_idx, RankTag<RP::rank>());
+  }
+  // added due to compiler error when using sfinae to choose operator based on
+  // rank
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<2>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      } else {
+        //      #pragma simd
+        LOOP_2L(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      } else {
+        //      #pragma simd
+        LOOP_2R(index_type, m_tiledims) { apply(LOOP_ARGS_2); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 2
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<3>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      } else {
+        //      #pragma simd
+        LOOP_3L(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      } else {
+        //      #pragma simd
+        LOOP_3R(index_type, m_tiledims) { apply(LOOP_ARGS_3); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 3
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<4>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      } else {
+        //      #pragma simd
+        LOOP_4L(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      } else {
+        //      #pragma simd
+        LOOP_4R(index_type, m_tiledims) { apply(LOOP_ARGS_4); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 4
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<5>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      } else {
+        //      #pragma simd
+        LOOP_5L(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      } else {
+        //      #pragma simd
+        LOOP_5R(index_type, m_tiledims) { apply(LOOP_ARGS_5); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 5
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<6>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      } else {
+        //      #pragma simd
+        LOOP_6L(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      } else {
+        //      #pragma simd
+        LOOP_6R(index_type, m_tiledims) { apply(LOOP_ARGS_6); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 6
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<7>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      } else {
+        //      #pragma simd
+        LOOP_7L(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      } else {
+        //      #pragma simd
+        LOOP_7R(index_type, m_tiledims) { apply(LOOP_ARGS_7); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 7
+
+  template <typename IType>
+  inline void operator_impl(IType tile_idx, const RankTag<8>) const {
+    point_type m_offset;
+    point_type m_tiledims;
+
+    if (RP::outer_direction == Iterate::Left) {
+      for (int i = 0; i < RP::rank; ++i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    } else {
+      for (int i = RP::rank - 1; i >= 0; --i) {
+        m_offset[i] =
+            (tile_idx % m_rp.m_tile_end[i]) * m_rp.m_tile[i] + m_rp.m_lower[i];
+        tile_idx /= m_rp.m_tile_end[i];
+      }
+    }
+
+    // Check if offset+tiledim in bounds - if not, replace tile dims with the
+    // partial tile dims
+    const bool full_tile = check_iteration_bounds(m_tiledims, m_offset);
+
+    if (RP::inner_direction == Iterate::Left) {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      } else {
+        //      #pragma simd
+        LOOP_8L(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      }
+    }  // end Iterate::Left
+    else {
+      if (full_tile) {
+        //      #pragma simd
+        LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      } else {
+        //      #pragma simd
+        LOOP_8R(index_type, m_tiledims) { apply(LOOP_ARGS_8); }
+      }
+    }  // end Iterate::Right
+
+  }  // end op() rank == 8
+#endif
+
+  template <typename... Args>
+  std::enable_if_t<(sizeof...(Args) == RP::rank && std::is_void<Tag>::value),
+                   void>
+  apply(Args&&... args) const {
+    m_func(args..., m_v);
+  }
+
+  template <typename... Args>
+  std::enable_if_t<(sizeof...(Args) == RP::rank && !std::is_void<Tag>::value),
+                   void>
+  apply(Args&&... args) const {
+    m_func(m_tag, args..., m_v);
+  }
+
+  RP const& m_rp;
+  Functor const& m_func;
+  value_type* m_v;
+  std::conditional_t<std::is_void<Tag>::value, int, Tag> m_tag;
+};
+
+// ------------------------------------------------------------------ //
+
+#undef KOKKOS_ENABLE_NEW_LOOP_MACROS
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/KokkosExp_IterateTileGPU.hpp b/bundled/kokkos-3.7.00/core/src/impl/KokkosExp_IterateTileGPU.hpp
new file mode 100644 (file)
index 0000000..957c3b6
--- /dev/null
@@ -0,0 +1,1038 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXP_ITERATE_TILE_GPU_HPP
+#define KOKKOS_EXP_ITERATE_TILE_GPU_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <algorithm>
+
+#include <utility>
+
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <typeinfo>
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_SYCL
+template <typename index_type>
+struct EmulateCUDADim3 {
+  index_type x;
+  index_type y;
+  index_type z;
+};
+#endif
+
+template <class Tag, class Functor, class... Args>
+KOKKOS_IMPL_FORCEINLINE_FUNCTION std::enable_if_t<std::is_void<Tag>::value>
+_tag_invoke(Functor const& f, Args&&... args) {
+  f((Args &&) args...);
+}
+
+template <class Tag, class Functor, class... Args>
+KOKKOS_IMPL_FORCEINLINE_FUNCTION std::enable_if_t<!std::is_void<Tag>::value>
+_tag_invoke(Functor const& f, Args&&... args) {
+  f(Tag{}, (Args &&) args...);
+}
+
+template <class Tag, class Functor, class T, size_t N, size_t... Idxs,
+          class... Args>
+KOKKOS_IMPL_FORCEINLINE_FUNCTION void _tag_invoke_array_helper(
+    Functor const& f, T (&vals)[N], std::integer_sequence<size_t, Idxs...>,
+    Args&&... args) {
+  _tag_invoke<Tag>(f, vals[Idxs]..., (Args &&) args...);
+}
+
+template <class Tag, class Functor, class T, size_t N, class... Args>
+KOKKOS_IMPL_FORCEINLINE_FUNCTION void _tag_invoke_array(Functor const& f,
+                                                        T (&vals)[N],
+                                                        Args&&... args) {
+  _tag_invoke_array_helper<Tag>(f, vals, std::make_index_sequence<N>{},
+                                (Args &&) args...);
+}
+
+// ------------------------------------------------------------------ //
+// ParallelFor iteration pattern
+template <int N, typename PolicyType, typename Functor, typename Tag>
+struct DeviceIterateTile;
+
+// Rank 2
+template <typename PolicyType, typename Functor, typename Tag>
+struct DeviceIterateTile<2, PolicyType, Functor, Tag> {
+  using index_type = typename PolicyType::index_type;
+
+#ifdef KOKKOS_ENABLE_SYCL
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(
+      const PolicyType& policy_, const Functor& f_,
+      const EmulateCUDADim3<index_type> gridDim_,
+      const EmulateCUDADim3<index_type> blockIdx_,
+      const EmulateCUDADim3<index_type> threadIdx_)
+      : m_policy(policy_),
+        m_func(f_),
+        gridDim(gridDim_),
+        blockIdx(blockIdx_),
+        threadIdx(threadIdx_) {}
+#else
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(const PolicyType& policy_,
+                                                const Functor& f_)
+      : m_policy(policy_), m_func(f_) {}
+#endif
+
+  KOKKOS_IMPL_DEVICE_FUNCTION
+  void exec_range() const {
+    if (PolicyType::inner_direction == Iterate::Left) {
+      // Loop over size maxnumblocks until full range covered
+      for (index_type tile_id1 = static_cast<index_type>(blockIdx.y);
+           tile_id1 < m_policy.m_tile_end[1]; tile_id1 += gridDim.y) {
+        const index_type offset_1 =
+            tile_id1 * m_policy.m_tile[1] +
+            static_cast<index_type>(threadIdx.y) +
+            static_cast<index_type>(m_policy.m_lower[1]);
+        if (offset_1 < m_policy.m_upper[1] &&
+            static_cast<index_type>(threadIdx.y) < m_policy.m_tile[1]) {
+          for (index_type tile_id0 = static_cast<index_type>(blockIdx.x);
+               tile_id0 < m_policy.m_tile_end[0]; tile_id0 += gridDim.x) {
+            const index_type offset_0 =
+                tile_id0 * m_policy.m_tile[0] +
+                static_cast<index_type>(threadIdx.x) +
+                static_cast<index_type>(m_policy.m_lower[0]);
+            if (offset_0 < m_policy.m_upper[0] &&
+                static_cast<index_type>(threadIdx.x) < m_policy.m_tile[0]) {
+              Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1);
+            }
+          }
+        }
+      }
+    } else {
+      for (index_type tile_id0 = static_cast<index_type>(blockIdx.x);
+           tile_id0 < m_policy.m_tile_end[0]; tile_id0 += gridDim.x) {
+        const index_type offset_0 =
+            tile_id0 * m_policy.m_tile[0] +
+            static_cast<index_type>(threadIdx.x) +
+            static_cast<index_type>(m_policy.m_lower[0]);
+        if (offset_0 < m_policy.m_upper[0] &&
+            static_cast<index_type>(threadIdx.x) < m_policy.m_tile[0]) {
+          for (index_type tile_id1 = static_cast<index_type>(blockIdx.y);
+               tile_id1 < m_policy.m_tile_end[1]; tile_id1 += gridDim.y) {
+            const index_type offset_1 =
+                tile_id1 * m_policy.m_tile[1] +
+                static_cast<index_type>(threadIdx.y) +
+                static_cast<index_type>(m_policy.m_lower[1]);
+            if (offset_1 < m_policy.m_upper[1] &&
+                static_cast<index_type>(threadIdx.y) < m_policy.m_tile[1]) {
+              Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1);
+            }
+          }
+        }
+      }
+    }
+  }  // end exec_range
+
+ private:
+  const PolicyType& m_policy;
+  const Functor& m_func;
+#ifdef KOKKOS_ENABLE_SYCL
+  const EmulateCUDADim3<index_type> gridDim;
+  const EmulateCUDADim3<index_type> blockIdx;
+  const EmulateCUDADim3<index_type> threadIdx;
+#endif
+};
+
+// Rank 3
+template <typename PolicyType, typename Functor, typename Tag>
+struct DeviceIterateTile<3, PolicyType, Functor, Tag> {
+  using index_type = typename PolicyType::index_type;
+
+#ifdef KOKKOS_ENABLE_SYCL
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(
+      const PolicyType& policy_, const Functor& f_,
+      const EmulateCUDADim3<index_type> gridDim_,
+      const EmulateCUDADim3<index_type> blockIdx_,
+      const EmulateCUDADim3<index_type> threadIdx_)
+      : m_policy(policy_),
+        m_func(f_),
+        gridDim(gridDim_),
+        blockIdx(blockIdx_),
+        threadIdx(threadIdx_) {}
+#else
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(const PolicyType& policy_,
+                                                const Functor& f_)
+      : m_policy(policy_), m_func(f_) {}
+#endif
+
+  KOKKOS_IMPL_DEVICE_FUNCTION
+  void exec_range() const {
+    if (PolicyType::inner_direction == Iterate::Left) {
+      for (index_type tile_id2 = static_cast<index_type>(blockIdx.z);
+           tile_id2 < m_policy.m_tile_end[2]; tile_id2 += gridDim.z) {
+        const index_type offset_2 =
+            tile_id2 * m_policy.m_tile[2] +
+            static_cast<index_type>(threadIdx.z) +
+            static_cast<index_type>(m_policy.m_lower[2]);
+        if (offset_2 < m_policy.m_upper[2] &&
+            static_cast<index_type>(threadIdx.z) < m_policy.m_tile[2]) {
+          for (index_type tile_id1 = static_cast<index_type>(blockIdx.y);
+               tile_id1 < m_policy.m_tile_end[1]; tile_id1 += gridDim.y) {
+            const index_type offset_1 =
+                tile_id1 * m_policy.m_tile[1] +
+                static_cast<index_type>(threadIdx.y) +
+                static_cast<index_type>(m_policy.m_lower[1]);
+            if (offset_1 < m_policy.m_upper[1] &&
+                static_cast<index_type>(threadIdx.y) < m_policy.m_tile[1]) {
+              for (index_type tile_id0 = static_cast<index_type>(blockIdx.x);
+                   tile_id0 < m_policy.m_tile_end[0]; tile_id0 += gridDim.x) {
+                const index_type offset_0 =
+                    tile_id0 * m_policy.m_tile[0] +
+                    static_cast<index_type>(threadIdx.x) +
+                    static_cast<index_type>(m_policy.m_lower[0]);
+                if (offset_0 < m_policy.m_upper[0] &&
+                    static_cast<index_type>(threadIdx.x) < m_policy.m_tile[0]) {
+                  Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1, offset_2);
+                }
+              }
+            }
+          }
+        }
+      }
+    } else {
+      for (index_type tile_id0 = static_cast<index_type>(blockIdx.x);
+           tile_id0 < m_policy.m_tile_end[0]; tile_id0 += gridDim.x) {
+        const index_type offset_0 =
+            tile_id0 * m_policy.m_tile[0] +
+            static_cast<index_type>(threadIdx.x) +
+            static_cast<index_type>(m_policy.m_lower[0]);
+        if (offset_0 < m_policy.m_upper[0] &&
+            static_cast<index_type>(threadIdx.x) < m_policy.m_tile[0]) {
+          for (index_type tile_id1 = static_cast<index_type>(blockIdx.y);
+               tile_id1 < m_policy.m_tile_end[1]; tile_id1 += gridDim.y) {
+            const index_type offset_1 =
+                tile_id1 * m_policy.m_tile[1] +
+                static_cast<index_type>(threadIdx.y) +
+                static_cast<index_type>(m_policy.m_lower[1]);
+            if (offset_1 < m_policy.m_upper[1] &&
+                static_cast<index_type>(threadIdx.y) < m_policy.m_tile[1]) {
+              for (index_type tile_id2 = static_cast<index_type>(blockIdx.z);
+                   tile_id2 < m_policy.m_tile_end[2]; tile_id2 += gridDim.z) {
+                const index_type offset_2 =
+                    tile_id2 * m_policy.m_tile[2] +
+                    static_cast<index_type>(threadIdx.z) +
+                    static_cast<index_type>(m_policy.m_lower[2]);
+                if (offset_2 < m_policy.m_upper[2] &&
+                    static_cast<index_type>(threadIdx.z) < m_policy.m_tile[2]) {
+                  Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1, offset_2);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }  // end exec_range
+
+ private:
+  const PolicyType& m_policy;
+  const Functor& m_func;
+#ifdef KOKKOS_ENABLE_SYCL
+  const EmulateCUDADim3<index_type> gridDim;
+  const EmulateCUDADim3<index_type> blockIdx;
+  const EmulateCUDADim3<index_type> threadIdx;
+#endif
+};
+
+// Rank 4
+template <typename PolicyType, typename Functor, typename Tag>
+struct DeviceIterateTile<4, PolicyType, Functor, Tag> {
+  using index_type = typename PolicyType::index_type;
+
+#ifdef KOKKOS_ENABLE_SYCL
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(
+      const PolicyType& policy_, const Functor& f_,
+      const EmulateCUDADim3<index_type> gridDim_,
+      const EmulateCUDADim3<index_type> blockIdx_,
+      const EmulateCUDADim3<index_type> threadIdx_)
+      : m_policy(policy_),
+        m_func(f_),
+        gridDim(gridDim_),
+        blockIdx(blockIdx_),
+        threadIdx(threadIdx_) {}
+#else
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(const PolicyType& policy_,
+                                                const Functor& f_)
+      : m_policy(policy_), m_func(f_) {}
+#endif
+
+  static constexpr index_type max_blocks = 65535;
+
+  KOKKOS_IMPL_DEVICE_FUNCTION
+  void exec_range() const {
+    if (PolicyType::inner_direction == Iterate::Left) {
+      const index_type temp0  = m_policy.m_tile_end[0];
+      const index_type temp1  = m_policy.m_tile_end[1];
+      const index_type numbl0 = (temp0 <= max_blocks ? temp0 : max_blocks);
+      const index_type numbl1 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl0)
+               : (temp1 <= max_blocks ? temp1 : max_blocks));
+
+      const index_type tile_id0 = static_cast<index_type>(blockIdx.x) % numbl0;
+      const index_type tile_id1 = static_cast<index_type>(blockIdx.x) / numbl0;
+      const index_type thr_id0 =
+          static_cast<index_type>(threadIdx.x) % m_policy.m_tile[0];
+      const index_type thr_id1 =
+          static_cast<index_type>(threadIdx.x) / m_policy.m_tile[0];
+
+      for (index_type tile_id3 = static_cast<index_type>(blockIdx.z);
+           tile_id3 < m_policy.m_tile_end[3]; tile_id3 += gridDim.z) {
+        const index_type offset_3 =
+            tile_id3 * m_policy.m_tile[3] +
+            static_cast<index_type>(threadIdx.z) +
+            static_cast<index_type>(m_policy.m_lower[3]);
+        if (offset_3 < m_policy.m_upper[3] &&
+            static_cast<index_type>(threadIdx.z) < m_policy.m_tile[3]) {
+          for (index_type tile_id2 = static_cast<index_type>(blockIdx.y);
+               tile_id2 < m_policy.m_tile_end[2]; tile_id2 += gridDim.y) {
+            const index_type offset_2 =
+                tile_id2 * m_policy.m_tile[2] +
+                static_cast<index_type>(threadIdx.y) +
+                static_cast<index_type>(m_policy.m_lower[2]);
+            if (offset_2 < m_policy.m_upper[2] &&
+                static_cast<index_type>(threadIdx.y) < m_policy.m_tile[2]) {
+              for (index_type j = tile_id1; j < m_policy.m_tile_end[1];
+                   j += numbl1) {
+                const index_type offset_1 =
+                    j * m_policy.m_tile[1] + thr_id1 +
+                    static_cast<index_type>(m_policy.m_lower[1]);
+                if (offset_1 < m_policy.m_upper[1] &&
+                    thr_id1 < m_policy.m_tile[1]) {
+                  for (index_type i = tile_id0; i < m_policy.m_tile_end[0];
+                       i += numbl0) {
+                    const index_type offset_0 =
+                        i * m_policy.m_tile[0] + thr_id0 +
+                        static_cast<index_type>(m_policy.m_lower[0]);
+                    if (offset_0 < m_policy.m_upper[0] &&
+                        thr_id0 < m_policy.m_tile[0]) {
+                      Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1,
+                                             offset_2, offset_3);
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    } else {
+      const index_type temp0  = m_policy.m_tile_end[0];
+      const index_type temp1  = m_policy.m_tile_end[1];
+      const index_type numbl1 = (temp1 <= max_blocks ? temp1 : max_blocks);
+      const index_type numbl0 =
+          (temp0 * temp1 > max_blocks
+               ? index_type(max_blocks / numbl1)
+               : (temp0 <= max_blocks ? temp0 : max_blocks));
+
+      const index_type tile_id0 = static_cast<index_type>(blockIdx.x) / numbl1;
+      const index_type tile_id1 = static_cast<index_type>(blockIdx.x) % numbl1;
+      const index_type thr_id0 =
+          static_cast<index_type>(threadIdx.x) / m_policy.m_tile[1];
+      const index_type thr_id1 =
+          static_cast<index_type>(threadIdx.x) % m_policy.m_tile[1];
+
+      for (index_type i = tile_id0; i < m_policy.m_tile_end[0]; i += numbl0) {
+        const index_type offset_0 =
+            i * m_policy.m_tile[0] + thr_id0 +
+            static_cast<index_type>(m_policy.m_lower[0]);
+        if (offset_0 < m_policy.m_upper[0] && thr_id0 < m_policy.m_tile[0]) {
+          for (index_type j = tile_id1; j < m_policy.m_tile_end[1];
+               j += numbl1) {
+            const index_type offset_1 =
+                j * m_policy.m_tile[1] + thr_id1 +
+                static_cast<index_type>(m_policy.m_lower[1]);
+            if (offset_1 < m_policy.m_upper[1] &&
+                thr_id1 < m_policy.m_tile[1]) {
+              for (index_type tile_id2 = static_cast<index_type>(blockIdx.y);
+                   tile_id2 < m_policy.m_tile_end[2]; tile_id2 += gridDim.y) {
+                const index_type offset_2 =
+                    tile_id2 * m_policy.m_tile[2] +
+                    static_cast<index_type>(threadIdx.y) +
+                    static_cast<index_type>(m_policy.m_lower[2]);
+                if (offset_2 < m_policy.m_upper[2] &&
+                    static_cast<index_type>(threadIdx.y) < m_policy.m_tile[2]) {
+                  for (index_type tile_id3 =
+                           static_cast<index_type>(blockIdx.z);
+                       tile_id3 < m_policy.m_tile_end[3];
+                       tile_id3 += gridDim.z) {
+                    const index_type offset_3 =
+                        tile_id3 * m_policy.m_tile[3] +
+                        static_cast<index_type>(threadIdx.z) +
+                        static_cast<index_type>(m_policy.m_lower[3]);
+                    if (offset_3 < m_policy.m_upper[3] &&
+                        static_cast<index_type>(threadIdx.z) <
+                            m_policy.m_tile[3]) {
+                      Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1,
+                                             offset_2, offset_3);
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }  // end exec_range
+
+ private:
+  const PolicyType& m_policy;
+  const Functor& m_func;
+#ifdef KOKKOS_ENABLE_SYCL
+  const EmulateCUDADim3<index_type> gridDim;
+  const EmulateCUDADim3<index_type> blockIdx;
+  const EmulateCUDADim3<index_type> threadIdx;
+#endif
+};
+
+// Rank 5
+template <typename PolicyType, typename Functor, typename Tag>
+struct DeviceIterateTile<5, PolicyType, Functor, Tag> {
+  using index_type = typename PolicyType::index_type;
+
+#ifdef KOKKOS_ENABLE_SYCL
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(
+      const PolicyType& policy_, const Functor& f_,
+      const EmulateCUDADim3<index_type> gridDim_,
+      const EmulateCUDADim3<index_type> blockIdx_,
+      const EmulateCUDADim3<index_type> threadIdx_)
+      : m_policy(policy_),
+        m_func(f_),
+        gridDim(gridDim_),
+        blockIdx(blockIdx_),
+        threadIdx(threadIdx_) {}
+#else
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(const PolicyType& policy_,
+                                                const Functor& f_)
+      : m_policy(policy_), m_func(f_) {}
+#endif
+
+  static constexpr index_type max_blocks = 65535;
+
+  KOKKOS_IMPL_DEVICE_FUNCTION
+  void exec_range() const {
+    // LL
+    if (PolicyType::inner_direction == Iterate::Left) {
+      index_type temp0        = m_policy.m_tile_end[0];
+      index_type temp1        = m_policy.m_tile_end[1];
+      const index_type numbl0 = (temp0 <= max_blocks ? temp0 : max_blocks);
+      const index_type numbl1 =
+          (temp0 * temp1 > max_blocks
+               ? index_type(max_blocks / numbl0)
+               : (temp1 <= max_blocks ? temp1 : max_blocks));
+
+      const index_type tile_id0 = static_cast<index_type>(blockIdx.x) % numbl0;
+      const index_type tile_id1 = static_cast<index_type>(blockIdx.x) / numbl0;
+      const index_type thr_id0 =
+          static_cast<index_type>(threadIdx.x) % m_policy.m_tile[0];
+      const index_type thr_id1 =
+          static_cast<index_type>(threadIdx.x) / m_policy.m_tile[0];
+
+      temp0                   = m_policy.m_tile_end[2];
+      temp1                   = m_policy.m_tile_end[3];
+      const index_type numbl2 = (temp0 <= max_blocks ? temp0 : max_blocks);
+      const index_type numbl3 =
+          (temp0 * temp1 > max_blocks
+               ? index_type(max_blocks / numbl2)
+               : (temp1 <= max_blocks ? temp1 : max_blocks));
+
+      const index_type tile_id2 = static_cast<index_type>(blockIdx.y) % numbl2;
+      const index_type tile_id3 = static_cast<index_type>(blockIdx.y) / numbl2;
+      const index_type thr_id2 =
+          static_cast<index_type>(threadIdx.y) % m_policy.m_tile[2];
+      const index_type thr_id3 =
+          static_cast<index_type>(threadIdx.y) / m_policy.m_tile[2];
+
+      for (index_type tile_id4 = static_cast<index_type>(blockIdx.z);
+           tile_id4 < m_policy.m_tile_end[4]; tile_id4 += gridDim.z) {
+        const index_type offset_4 =
+            tile_id4 * m_policy.m_tile[4] +
+            static_cast<index_type>(threadIdx.z) +
+            static_cast<index_type>(m_policy.m_lower[4]);
+        if (offset_4 < m_policy.m_upper[4] &&
+            static_cast<index_type>(threadIdx.z) < m_policy.m_tile[4]) {
+          for (index_type l = tile_id3; l < m_policy.m_tile_end[3];
+               l += numbl3) {
+            const index_type offset_3 =
+                l * m_policy.m_tile[3] + thr_id3 +
+                static_cast<index_type>(m_policy.m_lower[3]);
+            if (offset_3 < m_policy.m_upper[3] &&
+                thr_id3 < m_policy.m_tile[3]) {
+              for (index_type k = tile_id2; k < m_policy.m_tile_end[2];
+                   k += numbl2) {
+                const index_type offset_2 =
+                    k * m_policy.m_tile[2] + thr_id2 +
+                    static_cast<index_type>(m_policy.m_lower[2]);
+                if (offset_2 < m_policy.m_upper[2] &&
+                    thr_id2 < m_policy.m_tile[2]) {
+                  for (index_type j = tile_id1; j < m_policy.m_tile_end[1];
+                       j += numbl1) {
+                    const index_type offset_1 =
+                        j * m_policy.m_tile[1] + thr_id1 +
+                        static_cast<index_type>(m_policy.m_lower[1]);
+                    if (offset_1 < m_policy.m_upper[1] &&
+                        thr_id1 < m_policy.m_tile[1]) {
+                      for (index_type i = tile_id0; i < m_policy.m_tile_end[0];
+                           i += numbl0) {
+                        const index_type offset_0 =
+                            i * m_policy.m_tile[0] + thr_id0 +
+                            static_cast<index_type>(m_policy.m_lower[0]);
+                        if (offset_0 < m_policy.m_upper[0] &&
+                            thr_id0 < m_policy.m_tile[0]) {
+                          Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1,
+                                                 offset_2, offset_3, offset_4);
+                        }
+                      }
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+    // LR
+    else {
+      index_type temp0        = m_policy.m_tile_end[0];
+      index_type temp1        = m_policy.m_tile_end[1];
+      const index_type numbl1 = (temp1 <= max_blocks ? temp1 : max_blocks);
+      const index_type numbl0 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl1)
+               : (temp0 <= max_blocks ? temp0 : max_blocks));
+
+      const index_type tile_id0 = static_cast<index_type>(blockIdx.x) / numbl1;
+      const index_type tile_id1 = static_cast<index_type>(blockIdx.x) % numbl1;
+      const index_type thr_id0 =
+          static_cast<index_type>(threadIdx.x) / m_policy.m_tile[1];
+      const index_type thr_id1 =
+          static_cast<index_type>(threadIdx.x) % m_policy.m_tile[1];
+
+      temp0                   = m_policy.m_tile_end[2];
+      temp1                   = m_policy.m_tile_end[3];
+      const index_type numbl3 = (temp1 <= max_blocks ? temp1 : max_blocks);
+      const index_type numbl2 =
+          (temp0 * temp1 > max_blocks
+               ? index_type(max_blocks / numbl3)
+               : (temp0 <= max_blocks ? temp0 : max_blocks));
+
+      const index_type tile_id2 = static_cast<index_type>(blockIdx.y) / numbl3;
+      const index_type tile_id3 = static_cast<index_type>(blockIdx.y) % numbl3;
+      const index_type thr_id2 =
+          static_cast<index_type>(threadIdx.y) / m_policy.m_tile[3];
+      const index_type thr_id3 =
+          static_cast<index_type>(threadIdx.y) % m_policy.m_tile[3];
+
+      for (index_type i = tile_id0; i < m_policy.m_tile_end[0]; i += numbl0) {
+        const index_type offset_0 =
+            i * m_policy.m_tile[0] + thr_id0 +
+            static_cast<index_type>(m_policy.m_lower[0]);
+        if (offset_0 < m_policy.m_upper[0] && thr_id0 < m_policy.m_tile[0]) {
+          for (index_type j = tile_id1; j < m_policy.m_tile_end[1];
+               j += numbl1) {
+            const index_type offset_1 =
+                j * m_policy.m_tile[1] + thr_id1 +
+                static_cast<index_type>(m_policy.m_lower[1]);
+            if (offset_1 < m_policy.m_upper[1] &&
+                thr_id1 < m_policy.m_tile[1]) {
+              for (index_type k = tile_id2; k < m_policy.m_tile_end[2];
+                   k += numbl2) {
+                const index_type offset_2 =
+                    k * m_policy.m_tile[2] + thr_id2 +
+                    static_cast<index_type>(m_policy.m_lower[2]);
+                if (offset_2 < m_policy.m_upper[2] &&
+                    thr_id2 < m_policy.m_tile[2]) {
+                  for (index_type l = tile_id3; l < m_policy.m_tile_end[3];
+                       l += numbl3) {
+                    const index_type offset_3 =
+                        l * m_policy.m_tile[3] + thr_id3 +
+                        static_cast<index_type>(m_policy.m_lower[3]);
+                    if (offset_3 < m_policy.m_upper[3] &&
+                        thr_id3 < m_policy.m_tile[3]) {
+                      for (index_type tile_id4 =
+                               static_cast<index_type>(blockIdx.z);
+                           tile_id4 < m_policy.m_tile_end[4];
+                           tile_id4 += gridDim.z) {
+                        const index_type offset_4 =
+                            tile_id4 * m_policy.m_tile[4] +
+                            static_cast<index_type>(threadIdx.z) +
+                            static_cast<index_type>(m_policy.m_lower[4]);
+                        if (offset_4 < m_policy.m_upper[4] &&
+                            static_cast<index_type>(threadIdx.z) <
+                                m_policy.m_tile[4]) {
+                          Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1,
+                                                 offset_2, offset_3, offset_4);
+                        }
+                      }
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }  // end exec_range
+
+ private:
+  const PolicyType& m_policy;
+  const Functor& m_func;
+#ifdef KOKKOS_ENABLE_SYCL
+  const EmulateCUDADim3<index_type> gridDim;
+  const EmulateCUDADim3<index_type> blockIdx;
+  const EmulateCUDADim3<index_type> threadIdx;
+#endif
+};
+
+// Rank 6
+template <typename PolicyType, typename Functor, typename Tag>
+struct DeviceIterateTile<6, PolicyType, Functor, Tag> {
+  using index_type = typename PolicyType::index_type;
+
+#ifdef KOKKOS_ENABLE_SYCL
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(
+      const PolicyType& policy_, const Functor& f_,
+      const EmulateCUDADim3<index_type> gridDim_,
+      const EmulateCUDADim3<index_type> blockIdx_,
+      const EmulateCUDADim3<index_type> threadIdx_)
+      : m_policy(policy_),
+        m_func(f_),
+        gridDim(gridDim_),
+        blockIdx(blockIdx_),
+        threadIdx(threadIdx_) {}
+#else
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(const PolicyType& policy_,
+                                                const Functor& f_)
+      : m_policy(policy_), m_func(f_) {}
+#endif
+
+  static constexpr index_type max_blocks = 65535;
+
+  KOKKOS_IMPL_DEVICE_FUNCTION
+  void exec_range() const {
+    // LL
+    if (PolicyType::inner_direction == Iterate::Left) {
+      index_type temp0        = m_policy.m_tile_end[0];
+      index_type temp1        = m_policy.m_tile_end[1];
+      const index_type numbl0 = (temp0 <= max_blocks ? temp0 : max_blocks);
+      const index_type numbl1 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl0)
+               : (temp1 <= max_blocks ? temp1 : max_blocks));
+
+      const index_type tile_id0 = static_cast<index_type>(blockIdx.x) % numbl0;
+      const index_type tile_id1 = static_cast<index_type>(blockIdx.x) / numbl0;
+      const index_type thr_id0 =
+          static_cast<index_type>(threadIdx.x) % m_policy.m_tile[0];
+      const index_type thr_id1 =
+          static_cast<index_type>(threadIdx.x) / m_policy.m_tile[0];
+
+      temp0                   = m_policy.m_tile_end[2];
+      temp1                   = m_policy.m_tile_end[3];
+      const index_type numbl2 = (temp0 <= max_blocks ? temp0 : max_blocks);
+      const index_type numbl3 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl2)
+               : (temp1 <= max_blocks ? temp1 : max_blocks));
+
+      const index_type tile_id2 = static_cast<index_type>(blockIdx.y) % numbl2;
+      const index_type tile_id3 = static_cast<index_type>(blockIdx.y) / numbl2;
+      const index_type thr_id2 =
+          static_cast<index_type>(threadIdx.y) % m_policy.m_tile[2];
+      const index_type thr_id3 =
+          static_cast<index_type>(threadIdx.y) / m_policy.m_tile[2];
+
+      temp0                   = m_policy.m_tile_end[4];
+      temp1                   = m_policy.m_tile_end[5];
+      const index_type numbl4 = (temp0 <= max_blocks ? temp0 : max_blocks);
+      const index_type numbl5 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl4)
+               : (temp1 <= max_blocks ? temp1 : max_blocks));
+
+      const index_type tile_id4 = static_cast<index_type>(blockIdx.z) % numbl4;
+      const index_type tile_id5 = static_cast<index_type>(blockIdx.z) / numbl4;
+      const index_type thr_id4 =
+          static_cast<index_type>(threadIdx.z) % m_policy.m_tile[4];
+      const index_type thr_id5 =
+          static_cast<index_type>(threadIdx.z) / m_policy.m_tile[4];
+
+      for (index_type n = tile_id5; n < m_policy.m_tile_end[5]; n += numbl5) {
+        const index_type offset_5 =
+            n * m_policy.m_tile[5] + thr_id5 +
+            static_cast<index_type>(m_policy.m_lower[5]);
+        if (offset_5 < m_policy.m_upper[5] && thr_id5 < m_policy.m_tile[5]) {
+          for (index_type m = tile_id4; m < m_policy.m_tile_end[4];
+               m += numbl4) {
+            const index_type offset_4 =
+                m * m_policy.m_tile[4] + thr_id4 +
+                static_cast<index_type>(m_policy.m_lower[4]);
+            if (offset_4 < m_policy.m_upper[4] &&
+                thr_id4 < m_policy.m_tile[4]) {
+              for (index_type l = tile_id3; l < m_policy.m_tile_end[3];
+                   l += numbl3) {
+                const index_type offset_3 =
+                    l * m_policy.m_tile[3] + thr_id3 +
+                    static_cast<index_type>(m_policy.m_lower[3]);
+                if (offset_3 < m_policy.m_upper[3] &&
+                    thr_id3 < m_policy.m_tile[3]) {
+                  for (index_type k = tile_id2; k < m_policy.m_tile_end[2];
+                       k += numbl2) {
+                    const index_type offset_2 =
+                        k * m_policy.m_tile[2] + thr_id2 +
+                        static_cast<index_type>(m_policy.m_lower[2]);
+                    if (offset_2 < m_policy.m_upper[2] &&
+                        thr_id2 < m_policy.m_tile[2]) {
+                      for (index_type j = tile_id1; j < m_policy.m_tile_end[1];
+                           j += numbl1) {
+                        const index_type offset_1 =
+                            j * m_policy.m_tile[1] + thr_id1 +
+                            static_cast<index_type>(m_policy.m_lower[1]);
+                        if (offset_1 < m_policy.m_upper[1] &&
+                            thr_id1 < m_policy.m_tile[1]) {
+                          for (index_type i = tile_id0;
+                               i < m_policy.m_tile_end[0]; i += numbl0) {
+                            const index_type offset_0 =
+                                i * m_policy.m_tile[0] + thr_id0 +
+                                static_cast<index_type>(m_policy.m_lower[0]);
+                            if (offset_0 < m_policy.m_upper[0] &&
+                                thr_id0 < m_policy.m_tile[0]) {
+                              Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1,
+                                                     offset_2, offset_3,
+                                                     offset_4, offset_5);
+                            }
+                          }
+                        }
+                      }
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+    // LR
+    else {
+      index_type temp0        = m_policy.m_tile_end[0];
+      index_type temp1        = m_policy.m_tile_end[1];
+      const index_type numbl1 = (temp1 <= max_blocks ? temp1 : max_blocks);
+      const index_type numbl0 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl1)
+               : (temp0 <= max_blocks ? temp0 : max_blocks));
+
+      const index_type tile_id0 = static_cast<index_type>(blockIdx.x) / numbl1;
+      const index_type tile_id1 = static_cast<index_type>(blockIdx.x) % numbl1;
+      const index_type thr_id0 =
+          static_cast<index_type>(threadIdx.x) / m_policy.m_tile[1];
+      const index_type thr_id1 =
+          static_cast<index_type>(threadIdx.x) % m_policy.m_tile[1];
+
+      temp0                   = m_policy.m_tile_end[2];
+      temp1                   = m_policy.m_tile_end[3];
+      const index_type numbl3 = (temp1 <= max_blocks ? temp1 : max_blocks);
+      const index_type numbl2 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl3)
+               : (temp0 <= max_blocks ? temp0 : max_blocks));
+
+      const index_type tile_id2 = static_cast<index_type>(blockIdx.y) / numbl3;
+      const index_type tile_id3 = static_cast<index_type>(blockIdx.y) % numbl3;
+      const index_type thr_id2 =
+          static_cast<index_type>(threadIdx.y) / m_policy.m_tile[3];
+      const index_type thr_id3 =
+          static_cast<index_type>(threadIdx.y) % m_policy.m_tile[3];
+
+      temp0                   = m_policy.m_tile_end[4];
+      temp1                   = m_policy.m_tile_end[5];
+      const index_type numbl5 = (temp1 <= max_blocks ? temp1 : max_blocks);
+      const index_type numbl4 =
+          (temp0 * temp1 > max_blocks
+               ? static_cast<index_type>(max_blocks / numbl5)
+               : (temp0 <= max_blocks ? temp0 : max_blocks));
+
+      const index_type tile_id4 = static_cast<index_type>(blockIdx.z) / numbl5;
+      const index_type tile_id5 = static_cast<index_type>(blockIdx.z) % numbl5;
+      const index_type thr_id4 =
+          static_cast<index_type>(threadIdx.z) / m_policy.m_tile[5];
+      const index_type thr_id5 =
+          static_cast<index_type>(threadIdx.z) % m_policy.m_tile[5];
+
+      for (index_type i = tile_id0; i < m_policy.m_tile_end[0]; i += numbl0) {
+        const index_type offset_0 =
+            i * m_policy.m_tile[0] + thr_id0 +
+            static_cast<index_type>(m_policy.m_lower[0]);
+        if (offset_0 < m_policy.m_upper[0] && thr_id0 < m_policy.m_tile[0]) {
+          for (index_type j = tile_id1; j < m_policy.m_tile_end[1];
+               j += numbl1) {
+            const index_type offset_1 =
+                j * m_policy.m_tile[1] + thr_id1 +
+                static_cast<index_type>(m_policy.m_lower[1]);
+            if (offset_1 < m_policy.m_upper[1] &&
+                thr_id1 < m_policy.m_tile[1]) {
+              for (index_type k = tile_id2; k < m_policy.m_tile_end[2];
+                   k += numbl2) {
+                const index_type offset_2 =
+                    k * m_policy.m_tile[2] + thr_id2 +
+                    static_cast<index_type>(m_policy.m_lower[2]);
+                if (offset_2 < m_policy.m_upper[2] &&
+                    thr_id2 < m_policy.m_tile[2]) {
+                  for (index_type l = tile_id3; l < m_policy.m_tile_end[3];
+                       l += numbl3) {
+                    const index_type offset_3 =
+                        l * m_policy.m_tile[3] + thr_id3 +
+                        static_cast<index_type>(m_policy.m_lower[3]);
+                    if (offset_3 < m_policy.m_upper[3] &&
+                        thr_id3 < m_policy.m_tile[3]) {
+                      for (index_type m = tile_id4; m < m_policy.m_tile_end[4];
+                           m += numbl4) {
+                        const index_type offset_4 =
+                            m * m_policy.m_tile[4] + thr_id4 +
+                            static_cast<index_type>(m_policy.m_lower[4]);
+                        if (offset_4 < m_policy.m_upper[4] &&
+                            thr_id4 < m_policy.m_tile[4]) {
+                          for (index_type n = tile_id5;
+                               n < m_policy.m_tile_end[5]; n += numbl5) {
+                            const index_type offset_5 =
+                                n * m_policy.m_tile[5] + thr_id5 +
+                                static_cast<index_type>(m_policy.m_lower[5]);
+                            if (offset_5 < m_policy.m_upper[5] &&
+                                thr_id5 < m_policy.m_tile[5]) {
+                              Impl::_tag_invoke<Tag>(m_func, offset_0, offset_1,
+                                                     offset_2, offset_3,
+                                                     offset_4, offset_5);
+                            }
+                          }
+                        }
+                      }
+                    }
+                  }
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }  // end exec_range
+
+ private:
+  const PolicyType& m_policy;
+  const Functor& m_func;
+#ifdef KOKKOS_ENABLE_SYCL
+  const EmulateCUDADim3<index_type> gridDim;
+  const EmulateCUDADim3<index_type> blockIdx;
+  const EmulateCUDADim3<index_type> threadIdx;
+#endif
+};
+
+// ----------------------------------------------------------------------------------
+
+namespace Reduce {
+
+template <typename T>
+struct is_array_type : std::false_type {
+  using value_type = T;
+};
+
+template <typename T>
+struct is_array_type<T*> : std::true_type {
+  using value_type = T;
+};
+
+template <typename T>
+struct is_array_type<T[]> : std::true_type {
+  using value_type = T;
+};
+
+// ------------------------------------------------------------------ //
+
+template <typename T>
+using value_type_storage_t =
+    typename std::conditional_t<is_array_type<T>::value, std::decay<T>,
+                                std::add_lvalue_reference<T> >::type;
+
+// ParallelReduce iteration pattern
+// Scalar reductions
+
+// num_blocks = min( num_tiles, max_num_blocks ); //i.e. determined by number of
+// tiles and reduction algorithm constraints extract n-dim tile offsets (i.e.
+// tile's global starting mulit-index) from the tileid = blockid using tile
+// dimensions local indices within a tile extracted from (index_type)threadIdx_x
+// using tile dims, constrained by blocksize combine tile and local id info for
+// multi-dim global ids
+
+// Pattern:
+// Each block+thread is responsible for a tile+local_id combo (additional when
+// striding by num_blocks)
+// 1. create offset arrays
+// 2. loop over number of tiles, striding by griddim (equal to num tiles, or max
+// num blocks)
+// 3. temps set for tile_idx and thrd_idx, which will be modified
+// 4. if LL vs LR:
+//      determine tile starting point offsets (multidim)
+//      determine local index offsets (multidim)
+//      concatentate tile offset + local offset for global multi-dim index
+//    if offset withinin range bounds AND local offset within tile bounds, call
+//    functor
+
+template <int N, typename PolicyType, typename Functor, typename Tag,
+          typename ValueType, typename Enable = void>
+struct DeviceIterateTile {
+  using index_type         = typename PolicyType::index_type;
+  using value_type_storage = value_type_storage_t<ValueType>;
+
+#ifdef KOKKOS_ENABLE_SYCL
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(
+      const PolicyType& policy_, const Functor& f_, value_type_storage v_,
+      const EmulateCUDADim3<index_type> gridDim_,
+      const EmulateCUDADim3<index_type> blockIdx_,
+      const EmulateCUDADim3<index_type> threadIdx_)
+      : m_policy(policy_),
+        m_func(f_),
+        m_v(v_),
+        gridDim(gridDim_),
+        blockIdx(blockIdx_),
+        threadIdx(threadIdx_) {}
+#else
+  KOKKOS_IMPL_DEVICE_FUNCTION DeviceIterateTile(const PolicyType& policy_,
+                                                const Functor& f_,
+                                                value_type_storage v_)
+      : m_policy(policy_), m_func(f_), m_v(v_) {}
+#endif
+
+  KOKKOS_IMPL_DEVICE_FUNCTION
+  void exec_range() const {
+    if (static_cast<index_type>(blockIdx.x) < m_policy.m_num_tiles &&
+        static_cast<index_type>(threadIdx.y) < m_policy.m_prod_tile_dims) {
+      index_type m_offset[PolicyType::rank];  // tile starting global id offset
+      index_type
+          m_local_offset[PolicyType::rank];  // tile starting global id offset
+
+      for (index_type tileidx = static_cast<index_type>(blockIdx.x);
+           tileidx < m_policy.m_num_tiles; tileidx += gridDim.x) {
+        index_type tile_idx =
+            tileidx;  // temp because tile_idx will be modified while
+                      // determining tile starting point offsets
+        index_type thrd_idx = static_cast<index_type>(threadIdx.y);
+        bool in_bounds      = true;
+
+        // LL
+        if (PolicyType::inner_direction == Iterate::Left) {
+          for (int i = 0; i < PolicyType::rank; ++i) {
+            m_offset[i] =
+                (tile_idx % m_policy.m_tile_end[i]) * m_policy.m_tile[i] +
+                m_policy.m_lower[i];
+            tile_idx /= m_policy.m_tile_end[i];
+
+            // tile-local indices identified with (index_type)threadIdx_y
+            m_local_offset[i] = (thrd_idx % m_policy.m_tile[i]);
+            thrd_idx /= m_policy.m_tile[i];
+
+            m_offset[i] += m_local_offset[i];
+            if (!(m_offset[i] < m_policy.m_upper[i] &&
+                  m_local_offset[i] < m_policy.m_tile[i])) {
+              in_bounds = false;
+            }
+          }
+          if (in_bounds) {
+            Impl::_tag_invoke_array<Tag>(m_func, m_offset, m_v);
+          }
+        }
+        // LR
+        else {
+          for (int i = PolicyType::rank - 1; i >= 0; --i) {
+            m_offset[i] =
+                (tile_idx % m_policy.m_tile_end[i]) * m_policy.m_tile[i] +
+                m_policy.m_lower[i];
+            tile_idx /= m_policy.m_tile_end[i];
+
+            // tile-local indices identified with (index_type)threadIdx_y
+            m_local_offset[i] =
+                (thrd_idx %
+                 m_policy.m_tile[i]);  // Move this to first computation,
+                                       // add to m_offset right away
+            thrd_idx /= m_policy.m_tile[i];
+
+            m_offset[i] += m_local_offset[i];
+            if (!(m_offset[i] < m_policy.m_upper[i] &&
+                  m_local_offset[i] < m_policy.m_tile[i])) {
+              in_bounds = false;
+            }
+          }
+          if (in_bounds) {
+            Impl::_tag_invoke_array<Tag>(m_func, m_offset, m_v);
+          }
+        }
+      }
+    }
+  }  // end exec_range
+
+ private:
+  const PolicyType& m_policy;
+  const Functor& m_func;
+  value_type_storage m_v;
+#ifdef KOKKOS_ENABLE_SYCL
+  const EmulateCUDADim3<index_type> gridDim;
+  const EmulateCUDADim3<index_type> blockIdx;
+  const EmulateCUDADim3<index_type> threadIdx;
+#endif
+};
+
+}  // namespace Reduce
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/KokkosExp_ViewMapping.hpp b/bundled/kokkos-3.7.00/core/src/impl/KokkosExp_ViewMapping.hpp
new file mode 100644 (file)
index 0000000..a41ffb1
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Deprecated file for backward compatibility
+
+#include <impl/Kokkos_ViewMapping.hpp>
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_AnalyzePolicy.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_AnalyzePolicy.hpp
new file mode 100644 (file)
index 0000000..2ffcd62
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_ANALYZE_POLICY_HPP
+#define KOKKOS_IMPL_ANALYZE_POLICY_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Concepts.hpp>  // IndexType
+#include <traits/Kokkos_Traits_fwd.hpp>
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+
+#include <traits/Kokkos_ExecutionSpaceTrait.hpp>
+#include <traits/Kokkos_GraphKernelTrait.hpp>
+#include <traits/Kokkos_IndexTypeTrait.hpp>
+#include <traits/Kokkos_IterationPatternTrait.hpp>
+#include <traits/Kokkos_LaunchBoundsTrait.hpp>
+#include <traits/Kokkos_OccupancyControlTrait.hpp>
+#include <traits/Kokkos_ScheduleTrait.hpp>
+#include <traits/Kokkos_WorkItemPropertyTrait.hpp>
+#include <traits/Kokkos_WorkTagTrait.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="AnalyzePolicyBaseTraits"> {{{1
+
+// Mix in the defaults (base_traits) for the traits that aren't yet handled
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="MSVC EBO failure workaround"> {{{2
+
+template <class TraitSpecList>
+struct KOKKOS_IMPL_ENFORCE_EMPTY_BASE_OPTIMIZATION AnalyzeExecPolicyBaseTraits;
+template <class... TraitSpecifications>
+struct KOKKOS_IMPL_ENFORCE_EMPTY_BASE_OPTIMIZATION
+    AnalyzeExecPolicyBaseTraits<type_list<TraitSpecifications...>>
+    : TraitSpecifications::base_traits... {};
+
+// </editor-fold> end AnalyzePolicyBaseTraits }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="AnalyzeExecPolicy specializations"> {{{1
+
+//------------------------------------------------------------------------------
+// Note: unspecialized, so that the default pathway is to fall back to using
+// the PolicyTraitMatcher. See AnalyzeExecPolicyUseMatcher below
+template <class Enable, class... Traits>
+struct AnalyzeExecPolicy
+    : AnalyzeExecPolicyUseMatcher<void, execution_policy_trait_specifications,
+                                  Traits...> {
+  using base_t =
+      AnalyzeExecPolicyUseMatcher<void, execution_policy_trait_specifications,
+                                  Traits...>;
+  using base_t::base_t;
+};
+
+//------------------------------------------------------------------------------
+// Ignore void for backwards compatibility purposes, though hopefully no one is
+// using this in application code
+template <class... Traits>
+struct AnalyzeExecPolicy<void, void, Traits...>
+    : AnalyzeExecPolicy<void, Traits...> {
+  using base_t = AnalyzeExecPolicy<void, Traits...>;
+  using base_t::base_t;
+};
+
+//------------------------------------------------------------------------------
+template <>
+struct AnalyzeExecPolicy<void>
+    : AnalyzeExecPolicyBaseTraits<execution_policy_trait_specifications> {
+  // Ensure default constructibility since a converting constructor causes it to
+  // be deleted.
+  AnalyzeExecPolicy() = default;
+
+  // Base converting constructor and assignment operator: unless an individual
+  // policy analysis deletes a constructor, assume it's convertible
+  template <class Other>
+  AnalyzeExecPolicy(ExecPolicyTraitsWithDefaults<Other> const&) {}
+
+  template <class Other>
+  AnalyzeExecPolicy& operator=(ExecPolicyTraitsWithDefaults<Other> const&) {
+    return *this;
+  }
+};
+
+// </editor-fold> end AnalyzeExecPolicy specializations }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="AnalyzeExecPolicyUseMatcher"> {{{1
+
+// We can avoid having to have policies specialize AnalyzeExecPolicy themselves
+// by piggy-backing off of the PolicyTraitMatcher that we need to have for
+// things like require() anyway. We mixin the effects of the trait using
+// the `mixin_matching_trait` nested alias template in the trait specification
+
+// General PolicyTraitMatcher version
+
+// Matching case
+template <class TraitSpec, class... TraitSpecs, class Trait, class... Traits>
+struct AnalyzeExecPolicyUseMatcher<
+    std::enable_if_t<PolicyTraitMatcher<TraitSpec, Trait>::value>,
+    type_list<TraitSpec, TraitSpecs...>, Trait, Traits...>
+    : TraitSpec::template mixin_matching_trait<
+          Trait, AnalyzeExecPolicy<void, Traits...>> {
+  using base_t = typename TraitSpec::template mixin_matching_trait<
+      Trait, AnalyzeExecPolicy<void, Traits...>>;
+  using base_t::base_t;
+};
+
+// Non-matching case
+template <class TraitSpec, class... TraitSpecs, class Trait, class... Traits>
+struct AnalyzeExecPolicyUseMatcher<
+    std::enable_if_t<!PolicyTraitMatcher<TraitSpec, Trait>::value>,
+    type_list<TraitSpec, TraitSpecs...>, Trait, Traits...>
+    : AnalyzeExecPolicyUseMatcher<void, type_list<TraitSpecs...>, Trait,
+                                  Traits...> {
+  using base_t = AnalyzeExecPolicyUseMatcher<void, type_list<TraitSpecs...>,
+                                             Trait, Traits...>;
+  using base_t::base_t;
+};
+
+// No match found case:
+template <class>
+struct show_name_of_invalid_execution_policy_trait;
+template <class Trait, class... Traits>
+struct AnalyzeExecPolicyUseMatcher<void, type_list<>, Trait, Traits...> {
+  static constexpr auto trigger_error_message =
+      show_name_of_invalid_execution_policy_trait<Trait>{};
+  static_assert(
+      /* always false: */ std::is_void<Trait>::value,
+      "Unknown execution policy trait. Search compiler output for "
+      "'show_name_of_invalid_execution_policy_trait' to see the type of the "
+      "invalid trait.");
+};
+
+// All traits matched case:
+template <>
+struct AnalyzeExecPolicyUseMatcher<void, type_list<>>
+    : AnalyzeExecPolicy<void> {
+  using base_t = AnalyzeExecPolicy<void>;
+  using base_t::base_t;
+};
+
+// </editor-fold> end AnalyzeExecPolicyUseMatcher }}}1
+//==============================================================================
+
+//------------------------------------------------------------------------------
+// Used for defaults that depend on other analysis results
+template <class AnalysisResults>
+struct ExecPolicyTraitsWithDefaults : AnalysisResults {
+  using base_t = AnalysisResults;
+  using base_t::base_t;
+  // The old code turned this into an integral type for backwards compatibility,
+  // so that's what we're doing here. The original comment was:
+  //   nasty hack to make index_type into an integral_type
+  //   instead of the wrapped IndexType<T> for backwards compatibility
+  using index_type = typename std::conditional_t<
+      base_t::index_type_is_defaulted,
+      Kokkos::IndexType<typename base_t::execution_space::size_type>,
+      typename base_t::index_type>::type;
+};
+
+//------------------------------------------------------------------------------
+
+constexpr bool warn_if_deprecated(std::false_type) { return true; }
+KOKKOS_DEPRECATED_WITH_COMMENT(
+    "Invalid WorkTag template argument in execution policy!!")
+constexpr bool warn_if_deprecated(std::true_type) { return true; }
+#define KOKKOS_IMPL_STATIC_WARNING(...) \
+  static_assert(                        \
+      warn_if_deprecated(std::integral_constant<bool, __VA_ARGS__>()), "")
+
+template <typename... Traits>
+struct PolicyTraits
+    : ExecPolicyTraitsWithDefaults<AnalyzeExecPolicy<void, Traits...>> {
+  using base_t =
+      ExecPolicyTraitsWithDefaults<AnalyzeExecPolicy<void, Traits...>>;
+  using base_t::base_t;
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+  KOKKOS_IMPL_STATIC_WARNING(!std::is_empty<typename base_t::work_tag>::value &&
+                             !std::is_void<typename base_t::work_tag>::value);
+#endif
+};
+
+#undef KOKKOS_IMPL_STATIC_WARNING
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_IMPL_ANALYZE_POLICY_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Assembly.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Assembly.hpp
new file mode 100644 (file)
index 0000000..a31dd1c
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_ASSEMBLY_HPP)
+#define KOKKOS_ATOMIC_ASSEMBLY_HPP
+namespace Kokkos {
+
+namespace Impl {
+
+#if !defined(_WIN32)
+struct cas128_t {
+  uint64_t lower;
+  uint64_t upper;
+
+  KOKKOS_INLINE_FUNCTION
+  cas128_t() {
+    lower = 0;
+    upper = 0;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  cas128_t(const cas128_t& a) {
+    lower = a.lower;
+    upper = a.upper;
+  }
+  KOKKOS_INLINE_FUNCTION
+  cas128_t(volatile cas128_t* a) {
+    lower = a->lower;
+    upper = a->upper;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator!=(const cas128_t& a) const {
+    return (lower != a.lower) || upper != a.upper;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const cas128_t& a) {
+    lower = a.lower;
+    upper = a.upper;
+  }
+  KOKKOS_INLINE_FUNCTION
+  void operator=(const cas128_t& a) volatile {
+    lower = a.lower;
+    upper = a.upper;
+  }
+} __attribute__((__aligned__(16)));
+#endif
+
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+inline cas128_t cas128(volatile cas128_t* ptr, cas128_t cmp, cas128_t swap) {
+  bool swapped = false;
+  __asm__ __volatile__(
+      "lock cmpxchg16b %1\n\t"
+      "setz %0"
+      : "=q"(swapped), "+m"(*ptr), "+d"(cmp.upper), "+a"(cmp.lower)
+      : "c"(swap.upper), "b"(swap.lower), "q"(swapped));
+  return cmp;
+}
+#endif
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Compare_Exchange_Strong.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Compare_Exchange_Strong.hpp
new file mode 100644 (file)
index 0000000..e203c0a
--- /dev/null
@@ -0,0 +1,437 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && \
+    !defined(KOKKOS_ATOMIC_COMPARE_EXCHANGE_STRONG_HPP)
+#define KOKKOS_ATOMIC_COMPARE_EXCHANGE_STRONG_HPP
+
+#include <impl/Kokkos_Atomic_Memory_Order.hpp>
+#include <impl/Kokkos_Memory_Fence.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp>
+#endif
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+// Cuda native CAS supports int, unsigned int, and unsigned long long int
+// (non-standard type). Must cast-away 'volatile' for the CAS call.
+
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+__inline__ __device__ int atomic_compare_exchange(volatile int* const dest,
+                                                  const int compare,
+                                                  const int val) {
+  return atomicCAS((int*)dest, compare, val);
+}
+
+__inline__ __device__ unsigned int atomic_compare_exchange(
+    volatile unsigned int* const dest, const unsigned int compare,
+    const unsigned int val) {
+  return atomicCAS((unsigned int*)dest, compare, val);
+}
+
+__inline__ __device__ unsigned long long int atomic_compare_exchange(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int compare, const unsigned long long int val) {
+  return atomicCAS((unsigned long long int*)dest, compare, val);
+}
+
+template <typename T>
+__inline__ __device__ T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
+  const int tmp = atomicCAS((int*)dest, *((int*)&compare), *((int*)&val));
+  return *((T*)&tmp);
+}
+
+template <typename T>
+__inline__ __device__ T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T&>
+        val) {
+  using type     = unsigned long long int;
+  const type tmp = atomicCAS((type*)dest, *((type*)&compare), *((type*)&val));
+  return *((T*)&tmp);
+}
+
+template <typename T>
+__inline__ __device__ T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
+  T return_val;
+  // This is a way to (hopefully) avoid dead lock in a warp
+  int done                 = 0;
+  unsigned int mask        = __activemask();
+  unsigned int active      = __ballot_sync(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda_space((void*)dest)) {
+        Kokkos::memory_fence();
+        return_val = *dest;
+        if (return_val == compare) *dest = val;
+        Kokkos::memory_fence();
+        Impl::unlock_address_cuda_space((void*)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot_sync(mask, done);
+  }
+  return return_val;
+}
+#endif
+#endif
+
+//----------------------------------------------------------------------------
+// GCC native CAS supports int, long, unsigned int, unsigned long.
+// Intel native CAS support int and long with the same interface as GCC.
+#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+#if defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
+// atomic_compare_exchange are already defined in Kokkos_Atomic_Windows.hpp
+#elif defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+
+inline int atomic_compare_exchange(volatile int* const dest, const int compare,
+                                   const int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+inline long atomic_compare_exchange(volatile long* const dest,
+                                    const long compare, const long val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
+
+// GCC supports unsigned
+
+inline unsigned int atomic_compare_exchange(volatile unsigned int* const dest,
+                                            const unsigned int compare,
+                                            const unsigned int val) {
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+inline unsigned long atomic_compare_exchange(volatile unsigned long* const dest,
+                                             const unsigned long compare,
+                                             const unsigned long val) {
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+inline unsigned long long atomic_compare_exchange(
+    volatile unsigned long long* const dest, const unsigned long long compare,
+    const unsigned long long val) {
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+#endif
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
+  union U {
+    int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  tmp.i =
+      __sync_val_compare_and_swap((int*)dest, *((int*)&compare), *((int*)&val));
+  return tmp.t;
+}
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
+                     const T&>
+        val) {
+  union U {
+    long i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  tmp.i = __sync_val_compare_and_swap((long*)dest, *((long*)&compare),
+                                      *((long*)&val));
+  return tmp.t;
+}
+
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long) &&
+                         sizeof(T) == sizeof(Impl::cas128_t),
+                     const T&>
+        val) {
+  union U {
+    Impl::cas128_t i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  tmp.i = Impl::cas128((Impl::cas128_t*)dest, *((Impl::cas128_t*)&compare),
+                       *((Impl::cas128_t*)&val));
+  return tmp.t;
+}
+#endif
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T compare,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+                         && (sizeof(T) != 16)
+#endif
+                         ,
+                     const T>& val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  T return_val = *dest;
+  if (return_val == compare) {
+    // Don't use the following line of code here:
+    //
+    // const T tmp = *dest = val;
+    //
+    // Instead, put each assignment in its own statement.  This is
+    // because the overload of T::operator= for volatile *this should
+    // return void, not volatile T&.  See Kokkos #177:
+    //
+    // https://github.com/kokkos/kokkos/issues/177
+    *dest       = val;
+    const T tmp = *dest;
+#ifndef KOKKOS_COMPILER_CLANG
+    (void)tmp;
+#endif
+    Kokkos::memory_fence();
+  }
+  Impl::unlock_address_host_space((void*)dest);
+  return return_val;
+}
+//----------------------------------------------------------------------------
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest,
+                                                 const T compare, const T val) {
+  T retval;
+#pragma omp critical
+  {
+    retval = dest[0];
+    if (retval == compare) dest[0] = val;
+  }
+  return retval;
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest_v,
+                                                 const T compare, const T val) {
+  T* dest  = const_cast<T*>(dest_v);
+  T retval = *dest;
+  if (retval == compare) *dest = val;
+  return retval;
+}
+
+#endif
+#endif
+
+// dummy for non-CUDA Kokkos headers being processed by NVCC
+#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
+template <typename T>
+__inline__ __device__ T
+atomic_compare_exchange(volatile T* const, const Kokkos::Impl::identity_t<T>,
+                        const Kokkos::Impl::identity_t<T>) {
+  return T();
+}
+#endif
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION bool atomic_compare_exchange_strong(
+    volatile T* const dest, const T compare, const T val) {
+  return compare == atomic_compare_exchange(dest, compare, val);
+}
+//----------------------------------------------------------------------------
+
+namespace Impl {
+// memory-ordered versions are in the Impl namespace
+
+template <class T, class MemoryOrderFailure>
+KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
+    T* dest, T compare, T val, memory_order_seq_cst_t, MemoryOrderFailure) {
+  Kokkos::memory_fence();
+  auto rv = Kokkos::atomic_compare_exchange_strong(dest, compare, val);
+  Kokkos::memory_fence();
+  return rv;
+}
+
+template <class T, class MemoryOrderFailure>
+KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
+    T* dest, T compare, T val, memory_order_acquire_t, MemoryOrderFailure) {
+  auto rv = Kokkos::atomic_compare_exchange_strong(dest, compare, val);
+  Kokkos::memory_fence();
+  return rv;
+}
+
+template <class T, class MemoryOrderFailure>
+KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
+    T* dest, T compare, T val, memory_order_release_t, MemoryOrderFailure) {
+  Kokkos::memory_fence();
+  return Kokkos::atomic_compare_exchange_strong(dest, compare, val);
+}
+
+template <class T, class MemoryOrderFailure>
+KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong_fallback(
+    T* dest, T compare, T val, memory_order_relaxed_t, MemoryOrderFailure) {
+  return Kokkos::atomic_compare_exchange_strong(dest, compare, val);
+}
+
+#if (defined(KOKKOS_ENABLE_GNU_ATOMICS) && !defined(__CUDA_ARCH__)) ||   \
+    (defined(KOKKOS_ENABLE_INTEL_ATOMICS) && !defined(__CUDA_ARCH__)) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+
+#if defined(__CUDA_ARCH__)
+#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH __inline__ __device__
+#else
+#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH inline
+#endif
+
+template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
+KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH bool _atomic_compare_exchange_strong(
+    T* dest, T compare, T val, MemoryOrderSuccess, MemoryOrderFailure,
+    std::enable_if_t<
+        (sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8 ||
+         sizeof(T) == 16) &&
+            std::is_same<typename MemoryOrderSuccess::memory_order,
+                         std::remove_cv_t<MemoryOrderSuccess>>::value &&
+            std::is_same<typename MemoryOrderFailure::memory_order,
+                         std::remove_cv_t<MemoryOrderFailure>>::value,
+        void const**> = nullptr) {
+  return __atomic_compare_exchange_n(dest, &compare, val, /* weak = */ false,
+                                     MemoryOrderSuccess::gnu_constant,
+                                     MemoryOrderFailure::gnu_constant);
+}
+
+template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
+KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH bool _atomic_compare_exchange_strong(
+    T* dest, T compare, T val, MemoryOrderSuccess order_success,
+    MemoryOrderFailure order_failure,
+    std::enable_if_t<
+        !(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
+          sizeof(T) == 8 || sizeof(T) == 16) &&
+            std::is_same<typename MemoryOrderSuccess::memory_order,
+                         std::remove_cv_t<MemoryOrderSuccess>>::value &&
+            std::is_same<typename MemoryOrderFailure::memory_order,
+                         std::remove_cv_t<MemoryOrderFailure>>::value,
+        void const**> = nullptr) {
+  return _atomic_compare_exchange_fallback(dest, compare, val, order_success,
+                                           order_failure);
+}
+
+#else
+
+template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
+KOKKOS_INLINE_FUNCTION bool _atomic_compare_exchange_strong(
+    T* dest, T compare, T val, MemoryOrderSuccess order_success,
+    MemoryOrderFailure order_failure) {
+  return _atomic_compare_exchange_strong_fallback(dest, compare, val,
+                                                  order_success, order_failure);
+}
+
+#endif
+
+// TODO static asserts in overloads that don't make sense (as listed in
+// https://gcc.gnu.org/onlinedocs/gcc-5.2.0/gcc/_005f_005fatomic-Builtins.html)
+template <class T, class MemoryOrderSuccess, class MemoryOrderFailure>
+KOKKOS_FORCEINLINE_FUNCTION bool atomic_compare_exchange_strong(
+    T* dest, T compare, T val, MemoryOrderSuccess order_success,
+    MemoryOrderFailure order_failure) {
+  return _atomic_compare_exchange_strong(dest, compare, val, order_success,
+                                         order_failure);
+}
+
+}  // end namespace Impl
+
+}  // namespace Kokkos
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp>
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Compare_Exchange_Weak.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Compare_Exchange_Weak.hpp
new file mode 100644 (file)
index 0000000..ad5b010
--- /dev/null
@@ -0,0 +1,408 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Atomic.hpp>
+#ifndef KOKKOS_ATOMIC_COMPARE_EXCHANGE_WEAK_HPP
+#define KOKKOS_ATOMIC_COMPARE_EXCHANGE_WEAK_HPP
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// Cuda sm_70 or greater supports C++-like semantics directly
+
+#if defined(KOKKOS_ENABLE_CUDA)
+
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+#if __CUDA_ARCH__ >= 700
+// See: https://github.com/ogiroux/freestanding
+#define kokkos_cuda_internal_cas_release_32(ptr, old, expected, desired) \
+  asm volatile("atom.cas.release.sys.b32 %0, [%1], %2, %3;"              \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define kokkos_cuda_internal_cas_acquire_32(ptr, old, expected, desired) \
+  asm volatile("atom.cas.acquire.sys.b32 %0, [%1], %2, %3;"              \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define kokkos_cuda_internal_cas_acq_rel_32(ptr, old, expected, desired) \
+  asm volatile("atom.cas.acq_rel.sys.b32 %0, [%1], %2, %3;"              \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define kokkos_cuda_internal_cas_relaxed_32(ptr, old, expected, desired) \
+  asm volatile("atom.cas.relaxed.sys.b32 %0, [%1], %2, %3;"              \
+               : "=r"(old)                                               \
+               : "l"(ptr), "r"(expected), "r"(desired)                   \
+               : "memory")
+#define kokkos_cuda_internal_fence_seq_cst() \
+  asm volatile("fence.sc.sys;" : : : "memory")
+#define kokkos_cuda_internal_fence_acq_rel() \
+  asm volatile("fence.acq_rel.sys;" : : : "memory")
+#else
+#define kokkos_cuda_internal_fence_acq_rel() \
+  asm volatile("membar.sys;" : : : "memory")
+#define kokkos_cuda_internal_fence_seq_cst() \
+  asm volatile("membar.sys;" : : : "memory")
+#endif
+
+// 32-bit version
+template <class T, std::enable_if_t<sizeof(T) == 4, int> = 0>
+__inline__ __device__ bool atomic_compare_exchange_weak(
+    T volatile* const dest, T* const expected, T const desired,
+    std::memory_order success_order = std::memory_order_seq_cst,
+    std::memory_order failure_order = std::memory_order_seq_cst) {
+  // TODO assert that success_order >= failure_order
+  // See: https://github.com/ogiroux/freestanding
+  int32_t tmp = 0;
+  int32_t old = 0;
+  memcpy(&tmp, &desired, sizeof(T));
+  memcpy(&old, expected, sizeof(T));
+  int32_t old_tmp = old;
+#if __CUDA_ARCH__ >= 700
+  switch (success_order) {
+    case std::memory_order_seq_cst:
+      // sequentially consistent is just an acquire with a seq_cst fence
+      kokkos_cuda_internal_fence_seq_cst();
+      kokkos_cuda_internal_cas_acquire_32((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_acquire:
+      kokkos_cuda_internal_cas_acquire_32((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_consume:
+      // same as acquire on PTX compatible platforms
+      kokkos_cuda_internal_cas_acquire_32((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_acq_rel:
+      kokkos_cuda_internal_cas_acq_rel_32((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_release:
+      kokkos_cuda_internal_cas_release_32((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_relaxed:
+      kokkos_cuda_internal_cas_relaxed_32((T*)dest, old, old_tmp, tmp);
+      break;
+  };
+#else
+  // All of the orders that require a fence before the relaxed atomic operation:
+  if (success_order == std::memory_order_release ||
+      success_order == std::memory_order_acq_rel) {
+    kokkos_cuda_internal_fence_acq_rel();
+  } else if (success_order == std::memory_order_seq_cst) {
+    kokkos_cuda_internal_fence_seq_cst();
+  }
+  // This is relaxed:
+  // Cuda API requires casting away volatile
+  atomicCAS((T*)dest, old_tmp, tmp);
+#endif
+  bool const rv = (old == old_tmp);
+#if __CUDA_ARCH__ < 700
+  if (rv) {
+    if (success_order == std::memory_order_acquire ||
+        success_order == std::memory_order_consume ||
+        success_order == std::memory_order_acq_rel) {
+      kokkos_cuda_internal_fence_acq_rel();
+    } else if (success_order == std::memory_order_seq_cst) {
+      kokkos_cuda_internal_fence_seq_cst();
+    }
+  } else {
+    if (failure_order == std::memory_order_acquire ||
+        failure_order == std::memory_order_consume ||
+        failure_order == std::memory_order_acq_rel) {
+      kokkos_cuda_internal_fence_acq_rel();
+    } else if (failure_order == std::memory_order_seq_cst) {
+      kokkos_cuda_internal_fence_seq_cst();
+    }
+  }
+#endif
+  memcpy(expected, &old, sizeof(T));
+  return rv;
+}
+
+// 64-bit version
+template <class T, std::enable_if_t<sizeof(T) == 8, int> = 0>
+bool atomic_compare_exchange_weak(
+    T volatile* const dest, T* const expected, T const desired,
+    std::memory_order success_order = std::memory_order_seq_cst,
+    std::memory_order failure_order = std::memory_order_seq_cst) {
+  // TODO assert that success_order >= failure_order
+  // See: https://github.com/ogiroux/freestanding
+  int64_t tmp = 0;
+  int64_t old = 0;
+  memcpy(&tmp, &desired, sizeof(T));
+  memcpy(&old, expected, sizeof(T));
+  int64_t old_tmp = old;
+#if __CUDA_ARCH__ >= 700
+  switch (success_order) {
+    case std::memory_order_seq_cst:
+      // sequentially consistent is just an acquire with a seq_cst fence
+      kokkos_cuda_internal_fence_seq_cst();
+      kokkos_cuda_internal_cas_acquire_64((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_acquire:
+      kokkos_cuda_internal_cas_acquire_64((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_consume:
+      // same as acquire on PTX compatible platforms
+      kokkos_cuda_internal_cas_acquire_64((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_acq_rel:
+      kokkos_cuda_internal_cas_acq_rel_64((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_release:
+      kokkos_cuda_internal_cas_release_64((T*)dest, old, old_tmp, tmp);
+      break;
+    case std::memory_order_relaxed:
+      kokkos_cuda_internal_cas_relaxed_64((T*)dest, old, old_tmp, tmp);
+      break;
+  };
+#else
+  // Cuda API requires casting away volatile
+  atomicCAS((T*)dest, old_tmp, tmp);
+#endif
+  bool const rv = (old == old_tmp);
+  memcpy(expected, &old, sizeof(T));
+  return rv;
+}
+
+#endif  // defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+#endif  // defined( KOKKOS_ENABLE_CUDA )
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+// GCC native CAS supports int, long, unsigned int, unsigned long.
+// Intel native CAS support int and long with the same interface as GCC.
+#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+
+inline int atomic_compare_exchange(volatile int* const dest, const int compare,
+                                   const int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+inline long atomic_compare_exchange(volatile long* const dest,
+                                    const long compare, const long val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
+
+// GCC supports unsigned
+
+inline unsigned int atomic_compare_exchange(volatile unsigned int* const dest,
+                                            const unsigned int compare,
+                                            const unsigned int val) {
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+inline unsigned long atomic_compare_exchange(volatile unsigned long* const dest,
+                                             const unsigned long compare,
+                                             const unsigned long val) {
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+inline unsigned long long atomic_compare_exchange(
+    volatile unsigned long long* const dest, const unsigned long long compare,
+    const unsigned long long val) {
+  return __sync_val_compare_and_swap(dest, compare, val);
+}
+
+#endif
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
+  union U {
+    int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  tmp.i =
+      __sync_val_compare_and_swap((int*)dest, *((int*)&compare), *((int*)&val));
+  return tmp.t;
+}
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
+                     const T&>
+        val) {
+  union U {
+    long i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  tmp.i = __sync_val_compare_and_swap((long*)dest, *((long*)&compare),
+                                      *((long*)&val));
+  return tmp.t;
+}
+
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long) &&
+                         sizeof(T) == sizeof(Impl::cas128_t),
+                     const T&>
+        val) {
+  union U {
+    Impl::cas128_t i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  tmp.i = Impl::cas128((Impl::cas128_t*)dest, *((Impl::cas128_t*)&compare),
+                       *((Impl::cas128_t*)&val));
+  return tmp.t;
+}
+#endif
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T compare,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+                         && (sizeof(T) != 16)
+#endif
+                         ,
+                     const T>& val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  T return_val = *dest;
+  if (return_val == compare) {
+    // Don't use the following line of code here:
+    //
+    // const T tmp = *dest = val;
+    //
+    // Instead, put each assignment in its own statement.  This is
+    // because the overload of T::operator= for volatile *this should
+    // return void, not volatile T&.  See Kokkos #177:
+    //
+    // https://github.com/kokkos/kokkos/issues/177
+    *dest       = val;
+    const T tmp = *dest;
+#ifndef KOKKOS_COMPILER_CLANG
+    (void)tmp;
+#endif
+    Kokkos::memory_fence();
+  }
+  Impl::unlock_address_host_space((void*)dest);
+  return return_val;
+}
+//----------------------------------------------------------------------------
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest,
+                                                 const T compare, const T val) {
+  T retval;
+#pragma omp critical
+  {
+    retval = dest[0];
+    if (retval == compare) dest[0] = val;
+  }
+  return retval;
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_compare_exchange(volatile T* const dest_v,
+                                                 const T compare, const T val) {
+  T* dest  = const_cast<T*>(dest_v);
+  T retval = *dest;
+  if (retval == compare) *dest = val;
+  return retval;
+}
+
+#endif
+#endif
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION bool atomic_compare_exchange_strong(
+    volatile T* const dest, const T compare, const T val) {
+  return compare == atomic_compare_exchange(dest, compare, val);
+}
+//----------------------------------------------------------------------------
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Decrement.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Decrement.hpp
new file mode 100644 (file)
index 0000000..47961b5
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_DECREMENT_HPP)
+#define KOKKOS_ATOMIC_DECREMENT_HPP
+
+#include "impl/Kokkos_Atomic_Fetch_Sub.hpp"
+
+namespace Kokkos {
+
+// Atomic decrement
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_decrement<char>(volatile char* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock decb %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  char* a_nv = const_cast<char*>(a);
+  --(*a_nv);
+#else
+  Kokkos::atomic_fetch_sub(a, char(1));
+#endif
+}
+
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_decrement<short>(volatile short* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock decw %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  short* a_nv = const_cast<short*>(a);
+  --(*a_nv);
+#else
+  Kokkos::atomic_fetch_sub(a, short(1));
+#endif
+}
+
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_decrement<int>(volatile int* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock decl %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  int* a_nv = const_cast<int*>(a);
+  --(*a_nv);
+#else
+  Kokkos::atomic_fetch_sub(a, int(1));
+#endif
+}
+
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_decrement<long long int>(
+    volatile long long int* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock decq %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  long long int* a_nv = const_cast<long long int*>(a);
+  --(*a_nv);
+#else
+  using T = long long int;
+  Kokkos::atomic_fetch_sub(a, T(1));
+#endif
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_decrement(volatile T* a) {
+#if defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  T* a_nv = const_cast<T*>(a);
+  --(*a_nv);
+#else
+  Kokkos::atomic_fetch_sub(a, T(1));
+#endif
+}
+
+}  // End of namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Exchange.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Exchange.hpp
new file mode 100644 (file)
index 0000000..a8f77d8
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_EXCHANGE_HPP)
+#define KOKKOS_ATOMIC_EXCHANGE_HPP
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+__inline__ __device__ int atomic_exchange(volatile int* const dest,
+                                          const int val) {
+  // return __iAtomicExch( (int*) dest , val );
+  return atomicExch((int*)dest, val);
+}
+
+__inline__ __device__ unsigned int atomic_exchange(
+    volatile unsigned int* const dest, const unsigned int val) {
+  // return __uAtomicExch( (unsigned int*) dest , val );
+  return atomicExch((unsigned int*)dest, val);
+}
+
+__inline__ __device__ unsigned long long int atomic_exchange(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  // return __ullAtomicExch( (unsigned long long*) dest , val );
+  return atomicExch((unsigned long long*)dest, val);
+}
+
+/** \brief  Atomic exchange for any type with compatible size */
+template <typename T>
+__inline__ __device__ T
+atomic_exchange(volatile T* const dest,
+                std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
+  // int tmp = __ullAtomicExch( (int*) dest , *((int*)&val) );
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  int tmp = atomicExch(((int*)dest), *((int*)&val));
+  return *((T*)&tmp);
+}
+
+template <typename T>
+__inline__ __device__ T atomic_exchange(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T&>
+        val) {
+  using type = unsigned long long int;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  // type tmp = __ullAtomicExch( (type*) dest , *((type*)&val) );
+  type tmp = atomicExch(((type*)dest), *((type*)&val));
+  return *((T*)&tmp);
+}
+
+template <typename T>
+__inline__ __device__ T atomic_exchange(
+    volatile T* const dest,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
+  T return_val;
+  // This is a way to (hopefully) avoid dead lock in a warp
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  int done                 = 0;
+  unsigned int mask        = __activemask();
+  unsigned int active      = __ballot_sync(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda_space((void*)dest)) {
+        Kokkos::memory_fence();
+        return_val = *dest;
+        *dest      = val;
+        Kokkos::memory_fence();
+        Impl::unlock_address_cuda_space((void*)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot_sync(mask, done);
+  }
+  return return_val;
+}
+/** \brief  Atomic exchange for any type with compatible size */
+template <typename T>
+__inline__ __device__ void atomic_assign(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T&> val) {
+  // (void) __ullAtomicExch( (int*) dest , *((int*)&val) );
+  (void)atomicExch(((int*)dest), *((int*)&val));
+}
+
+template <typename T>
+__inline__ __device__ void atomic_assign(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T&>
+        val) {
+  using type = unsigned long long int;
+  // (void) __ullAtomicExch( (type*) dest , *((type*)&val) );
+  (void)atomicExch(((type*)dest), *((type*)&val));
+}
+
+template <typename T>
+__inline__ __device__ void atomic_assign(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) != sizeof(unsigned long long int),
+                     const T&>
+        val) {
+  (void)atomic_exchange(dest, val);
+}
+
+#endif
+#endif
+
+//----------------------------------------------------------------------------
+
+#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+
+template <typename T>
+inline T atomic_exchange(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) == sizeof(int) || sizeof(T) == sizeof(long),
+                     const T&>
+        val) {
+  using type = std::conditional_t<sizeof(T) == sizeof(int), int, long>;
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  const type v = *((type*)&val);  // Extract to be sure the value doesn't change
+
+  type assumed;
+
+  union U {
+    T val_T;
+    type val_type;
+    inline U() {}
+  } old;
+
+  old.val_T = *dest;
+
+  do {
+    assumed = old.val_type;
+    old.val_type =
+        __sync_val_compare_and_swap((volatile type*)dest, assumed, v);
+  } while (assumed != old.val_type);
+
+  return old.val_T;
+}
+
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+template <typename T>
+inline T atomic_exchange(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) == sizeof(Impl::cas128_t), const T&> val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  union U {
+    Impl::cas128_t i;
+    T t;
+    inline U() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+  newval.t = val;
+
+  do {
+    assume.i = oldval.i;
+    oldval.i = Impl::cas128((volatile Impl::cas128_t*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+#endif
+
+//----------------------------------------------------------------------------
+
+template <typename T>
+inline T atomic_exchange(volatile T* const dest,
+                         std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+                                              && (sizeof(T) != 16)
+#endif
+                                              ,
+                                          const T>& val) {
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  T return_val = *dest;
+  // Don't use the following line of code here:
+  //
+  // const T tmp = *dest = val;
+  //
+  // Instead, put each assignment in its own statement.  This is
+  // because the overload of T::operator= for volatile *this should
+  // return void, not volatile T&.  See Kokkos #177:
+  //
+  // https://github.com/kokkos/kokkos/issues/177
+  *dest       = val;
+  const T tmp = *dest;
+#ifndef KOKKOS_COMPILER_CLANG
+  (void)tmp;
+#endif
+  Kokkos::memory_fence();
+  Impl::unlock_address_host_space((void*)dest);
+  return return_val;
+}
+
+template <typename T>
+inline void atomic_assign(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) == sizeof(int) || sizeof(T) == sizeof(long),
+                     const T&>
+        val) {
+  using type = std::conditional_t<sizeof(T) == sizeof(int), int, long>;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  const type v = *((type*)&val);  // Extract to be sure the value doesn't change
+
+  type assumed;
+
+  union U {
+    T val_T;
+    type val_type;
+    inline U() {}
+  } old;
+
+  old.val_T = *dest;
+
+  do {
+    assumed = old.val_type;
+    old.val_type =
+        __sync_val_compare_and_swap((volatile type*)dest, assumed, v);
+  } while (assumed != old.val_type);
+}
+
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+template <typename T>
+inline void atomic_assign(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) == sizeof(Impl::cas128_t), const T&> val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  union U {
+    Impl::cas128_t i;
+    T t;
+    inline U() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+  newval.t = val;
+  do {
+    assume.i = oldval.i;
+    oldval.i = Impl::cas128((volatile Impl::cas128_t*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+}
+#endif
+
+template <typename T>
+inline void atomic_assign(volatile T* const dest,
+                          std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+                                               && (sizeof(T) != 16)
+#endif
+                                               ,
+                                           const T>& val) {
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  // This is likely an aggregate type with a defined
+  // 'volatile T & operator = ( const T & ) volatile'
+  // member.  The volatile return value implicitly defines a
+  // dereference that some compilers (gcc 4.7.2) warn is being ignored.
+  // Suppress warning by casting return to void.
+  //(void)( *dest = val );
+  *dest = val;
+  Kokkos::memory_fence();
+  Impl::unlock_address_host_space((void*)dest);
+}
+//----------------------------------------------------------------------------
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <typename T>
+inline T atomic_exchange(volatile T* const dest, const T val) {
+  T retval;
+  //#pragma omp atomic capture
+#pragma omp critical
+  {
+    retval  = dest[0];
+    dest[0] = val;
+  }
+  return retval;
+}
+
+template <typename T>
+inline void atomic_assign(volatile T* const dest, const T val) {
+  //#pragma omp atomic
+#pragma omp critical
+  { dest[0] = val; }
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+inline T atomic_exchange(volatile T* const dest_v, const T val) {
+  T* dest  = const_cast<T*>(dest_v);
+  T retval = *dest;
+  *dest    = val;
+  return retval;
+}
+
+template <typename T>
+inline void atomic_assign(volatile T* const dest_v, const T val) {
+  T* dest = const_cast<T*>(dest_v);
+  *dest   = val;
+}
+
+#endif
+#endif
+
+// dummy for non-CUDA Kokkos headers being processed by NVCC
+#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
+template <typename T>
+__inline__ __device__ T atomic_exchange(volatile T* const,
+                                        const Kokkos::Impl::identity_t<T>) {
+  return T();
+}
+
+template <typename T>
+__inline__ __device__ void atomic_assign(volatile T* const,
+                                         const Kokkos::Impl::identity_t<T>) {}
+#endif
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Add.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Add.hpp
new file mode 100644 (file)
index 0000000..c188f45
--- /dev/null
@@ -0,0 +1,389 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_ADD_HPP)
+#define KOKKOS_ATOMIC_FETCH_ADD_HPP
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+// Support for int, unsigned int, unsigned long long int, and float
+
+__inline__ __device__ int atomic_fetch_add(volatile int* const dest,
+                                           const int val) {
+  return atomicAdd((int*)dest, val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_add(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return atomicAdd((unsigned int*)dest, val);
+}
+
+__inline__ __device__ unsigned long long int atomic_fetch_add(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return atomicAdd((unsigned long long int*)dest, val);
+}
+
+__inline__ __device__ float atomic_fetch_add(volatile float* const dest,
+                                             const float val) {
+  return atomicAdd((float*)dest, val);
+}
+
+#if (600 <= __CUDA_ARCH__)
+__inline__ __device__ double atomic_fetch_add(volatile double* const dest,
+                                              const double val) {
+  return atomicAdd((double*)dest, val);
+}
+#endif
+
+template <typename T>
+__inline__ __device__ T
+atomic_fetch_add(volatile T* const dest,
+                 std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
+  // to work around a bug in the clang cuda compiler, the name here needs to be
+  // different from the one internal to the other overloads
+  union U1 {
+    int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U1() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t + val;
+    oldval.i = atomicCAS((int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <typename T>
+__inline__ __device__ T atomic_fetch_add(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T>
+        val) {
+  // to work around a bug in the clang cuda compiler, the name here needs to be
+  // different from the one internal to the other overloads
+  union U2 {
+    unsigned long long int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U2() {}
+  } assume, oldval, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t + val;
+    oldval.i = atomicCAS((unsigned long long int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+//----------------------------------------------------------------------------
+
+template <typename T>
+__inline__ __device__ T atomic_fetch_add(
+    volatile T* const dest,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
+  T return_val;
+  // This is a way to (hopefully) avoid dead lock in a warp
+  int done                 = 0;
+  unsigned int mask        = __activemask();
+  unsigned int active      = __ballot_sync(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      bool locked = Impl::lock_address_cuda_space((void*)dest);
+      if (locked) {
+        Kokkos::memory_fence();
+        return_val = *dest;
+        *dest      = return_val + val;
+        Kokkos::memory_fence();
+        Impl::unlock_address_cuda_space((void*)dest);
+        done = 1;
+      }
+    }
+
+    done_active = __ballot_sync(mask, done);
+  }
+  return return_val;
+}
+#endif
+#endif
+//----------------------------------------------------------------------------
+#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+
+#if defined(KOKKOS_ENABLE_ASM) && (defined(KOKKOS_ENABLE_ISA_X86_64) || \
+                                   defined(KOKKOS_KNL_USE_ASM_WORKAROUND))
+inline int atomic_fetch_add(volatile int* dest, const int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  int original = val;
+
+  __asm__ __volatile__("lock xadd %1, %0"
+                       : "+m"(*dest), "+r"(original)
+                       : "m"(*dest), "r"(original)
+                       : "memory");
+
+  return original;
+}
+#else
+inline int atomic_fetch_add(volatile int* const dest, const int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_add(dest, val);
+}
+#endif
+
+inline long int atomic_fetch_add(volatile long int* const dest,
+                                 const long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_add(dest, val);
+}
+
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
+
+inline unsigned int atomic_fetch_add(volatile unsigned int* const dest,
+                                     const unsigned int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_add(dest, val);
+}
+
+inline unsigned long int atomic_fetch_add(
+    volatile unsigned long int* const dest, const unsigned long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_add(dest, val);
+}
+
+inline unsigned long long int atomic_fetch_add(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_add(dest, val);
+}
+
+#endif
+
+template <typename T>
+inline T atomic_fetch_add(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
+  union U {
+    int i;
+    T t;
+    inline U() {}
+  } assume, oldval, newval;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t + val;
+    oldval.i = __sync_val_compare_and_swap((int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <typename T>
+inline T atomic_fetch_add(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
+                     const T>
+        val) {
+  union U {
+    long i;
+    T t;
+    inline U() {}
+  } assume, oldval, newval;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t + val;
+    oldval.i = __sync_val_compare_and_swap((long*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+template <typename T>
+inline T atomic_fetch_add(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) != sizeof(long) &&
+                         sizeof(T) == sizeof(Impl::cas128_t),
+                     const T>
+        val) {
+  union U {
+    Impl::cas128_t i;
+    T t;
+    inline U() {}
+  } assume, oldval, newval;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t + val;
+    oldval.i = Impl::cas128((volatile Impl::cas128_t*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+#endif
+
+//----------------------------------------------------------------------------
+
+template <typename T>
+inline T atomic_fetch_add(volatile T* const dest,
+                          std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+                                               && (sizeof(T) != 16)
+#endif
+                                               ,
+                                           const T>& val) {
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  T return_val = *dest;
+
+  // Don't use the following line of code here:
+  //
+  // const T tmp = *dest = return_val + val;
+  //
+  // Instead, put each assignment in its own statement.  This is
+  // because the overload of T::operator= for volatile *this should
+  // return void, not volatile T&.  See Kokkos #177:
+  //
+  // https://github.com/kokkos/kokkos/issues/177
+  *dest       = return_val + val;
+  const T tmp = *dest;
+  (void)tmp;
+  Kokkos::memory_fence();
+  Impl::unlock_address_host_space((void*)dest);
+
+  return return_val;
+}
+//----------------------------------------------------------------------------
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <typename T>
+T atomic_fetch_add(volatile T* const dest, const T val) {
+  T retval;
+#pragma omp atomic capture
+  {
+    retval = dest[0];
+    dest[0] += val;
+  }
+  return retval;
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+T atomic_fetch_add(volatile T* const dest_v, std::add_const_t<T> val) {
+  T* dest  = const_cast<T*>(dest_v);
+  T retval = *dest;
+  *dest += val;
+  return retval;
+}
+
+#endif
+#endif
+//----------------------------------------------------------------------------
+
+// dummy for non-CUDA Kokkos headers being processed by NVCC
+#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
+template <typename T>
+__inline__ __device__ T atomic_fetch_add(volatile T* const,
+                                         Kokkos::Impl::identity_t<T>) {
+  return T();
+}
+#endif
+
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_And.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_And.hpp
new file mode 100644 (file)
index 0000000..50bd2b0
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_AND_HPP)
+#define KOKKOS_ATOMIC_FETCH_AND_HPP
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+// Support for int, unsigned int, unsigned long long int, and float
+
+__inline__ __device__ int atomic_fetch_and(volatile int* const dest,
+                                           const int val) {
+  return atomicAnd((int*)dest, val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_and(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return atomicAnd((unsigned int*)dest, val);
+}
+
+#if defined(__CUDA_ARCH__) && (350 <= __CUDA_ARCH__)
+__inline__ __device__ unsigned long long int atomic_fetch_and(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return atomicAnd((unsigned long long int*)dest, val);
+}
+#endif
+#endif
+#endif
+
+// 08/05/20 Overload to work around https://bugs.llvm.org/show_bug.cgi?id=46922
+
+#if (defined(KOKKOS_ENABLE_CUDA) &&                   \
+     (defined(__CUDA_ARCH__) ||                       \
+      defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND))) || \
+    (defined(KOKKOS_ENABLE_HIP))
+__inline__ __device__ unsigned long atomic_fetch_and(
+    volatile unsigned long* const dest, const unsigned long val) {
+  return atomic_fetch_and<unsigned long>(dest, val);
+}
+__inline__ __device__ long atomic_fetch_and(volatile long* const dest,
+                                            long val) {
+  return atomic_fetch_and<long>(dest, val);
+}
+#endif
+
+//----------------------------------------------------------------------------
+#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+
+inline int atomic_fetch_and(volatile int* const dest, const int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_and(dest, val);
+}
+
+inline long int atomic_fetch_and(volatile long int* const dest,
+                                 const long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_and(dest, val);
+}
+
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
+
+inline unsigned int atomic_fetch_and(volatile unsigned int* const dest,
+                                     const unsigned int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_and(dest, val);
+}
+
+inline unsigned long int atomic_fetch_and(
+    volatile unsigned long int* const dest, const unsigned long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_and(dest, val);
+}
+
+inline unsigned long long int atomic_fetch_and(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_and(dest, val);
+}
+
+#endif
+
+//----------------------------------------------------------------------------
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <typename T>
+T atomic_fetch_and(volatile T* const dest, const T val) {
+  T retval;
+#pragma omp atomic capture
+  {
+    retval = dest[0];
+    dest[0] &= val;
+  }
+  return retval;
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+T atomic_fetch_and(volatile T* const dest_v, const T val) {
+  T* dest  = const_cast<T*>(dest_v);
+  T retval = *dest;
+  *dest &= val;
+  return retval;
+}
+
+#endif
+#endif
+//----------------------------------------------------------------------------
+
+// dummy for non-CUDA Kokkos headers being processed by NVCC
+#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
+template <typename T>
+__inline__ __device__ T atomic_fetch_and(volatile T* const,
+                                         Kokkos::Impl::identity_t<T>) {
+  return T();
+}
+#endif
+
+// Simpler version of atomic_fetch_and without the fetch
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_and(volatile T* const dest, const T src) {
+  (void)atomic_fetch_and(dest, src);
+}
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Or.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Or.hpp
new file mode 100644 (file)
index 0000000..7a04a8c
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_OR_HPP)
+#define KOKKOS_ATOMIC_FETCH_OR_HPP
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+// Support for int, unsigned int, unsigned long long int, and float
+
+__inline__ __device__ int atomic_fetch_or(volatile int* const dest,
+                                          const int val) {
+  return atomicOr((int*)dest, val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_or(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return atomicOr((unsigned int*)dest, val);
+}
+
+#if defined(__CUDA_ARCH__) && (350 <= __CUDA_ARCH__)
+__inline__ __device__ unsigned long long int atomic_fetch_or(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return atomicOr((unsigned long long int*)dest, val);
+}
+#endif
+#endif
+#endif
+
+// 08/05/20 Overload to work around https://bugs.llvm.org/show_bug.cgi?id=46922
+
+#if (defined(KOKKOS_ENABLE_CUDA) &&                   \
+     (defined(__CUDA_ARCH__) ||                       \
+      defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND))) || \
+    (defined(KOKKOS_ENABLE_HIP))
+__inline__ __device__ unsigned long atomic_fetch_or(
+    volatile unsigned long* const dest, const unsigned long val) {
+  return atomic_fetch_or<unsigned long>(dest, val);
+}
+
+__inline__ __device__ long atomic_fetch_or(volatile long* const dest,
+                                           long val) {
+  return atomic_fetch_or<long>(dest, val);
+}
+#endif
+
+//----------------------------------------------------------------------------
+#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+
+inline int atomic_fetch_or(volatile int* const dest, const int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_or(dest, val);
+}
+
+inline long int atomic_fetch_or(volatile long int* const dest,
+                                const long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_or(dest, val);
+}
+
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
+
+inline unsigned int atomic_fetch_or(volatile unsigned int* const dest,
+                                    const unsigned int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_or(dest, val);
+}
+
+inline unsigned long int atomic_fetch_or(volatile unsigned long int* const dest,
+                                         const unsigned long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_or(dest, val);
+}
+
+inline unsigned long long int atomic_fetch_or(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_or(dest, val);
+}
+
+#endif
+
+//----------------------------------------------------------------------------
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <typename T>
+T atomic_fetch_or(volatile T* const dest, const T val) {
+  T retval;
+#pragma omp atomic capture
+  {
+    retval = dest[0];
+    dest[0] |= val;
+  }
+  return retval;
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+T atomic_fetch_or(volatile T* const dest_v, const T val) {
+  T* dest  = const_cast<T*>(dest_v);
+  T retval = *dest;
+  *dest |= val;
+  return retval;
+}
+
+#endif
+#endif
+//----------------------------------------------------------------------------
+
+// dummy for non-CUDA Kokkos headers being processed by NVCC
+#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
+template <typename T>
+__inline__ __device__ T atomic_fetch_or(volatile T* const,
+                                        Kokkos::Impl::identity_t<T>) {
+  return T();
+}
+#endif
+
+// Simpler version of atomic_fetch_or without the fetch
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_or(volatile T* const dest, const T src) {
+  (void)atomic_fetch_or(dest, src);
+}
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Sub.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Fetch_Sub.hpp
new file mode 100644 (file)
index 0000000..6aaf369
--- /dev/null
@@ -0,0 +1,323 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_FETCH_SUB_HPP)
+#define KOKKOS_ATOMIC_FETCH_SUB_HPP
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+// Support for int, unsigned int, unsigned long long int, and float
+
+__inline__ __device__ int atomic_fetch_sub(volatile int* const dest,
+                                           const int val) {
+  return atomicSub((int*)dest, val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_sub(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return atomicSub((unsigned int*)dest, val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_sub(
+    volatile int64_t* const dest, const int64_t val) {
+  return atomic_fetch_add(dest, -val);
+}
+
+__inline__ __device__ unsigned int atomic_fetch_sub(volatile float* const dest,
+                                                    const float val) {
+  return atomicAdd((float*)dest, -val);
+}
+
+#if (600 <= __CUDA_ARCH__)
+__inline__ __device__ unsigned int atomic_fetch_sub(volatile double* const dest,
+                                                    const double val) {
+  return atomicAdd((double*)dest, -val);
+}
+#endif
+
+template <typename T>
+__inline__ __device__ T
+atomic_fetch_sub(volatile T* const dest,
+                 std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
+  union U {
+    int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t - val;
+    oldval.i = atomicCAS((int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <typename T>
+__inline__ __device__ T atomic_fetch_sub(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T>
+        val) {
+  union U {
+    unsigned long long int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t - val;
+    oldval.i = atomicCAS((unsigned long long int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+//----------------------------------------------------------------------------
+
+template <typename T>
+__inline__ __device__ T atomic_fetch_sub(
+    volatile T* const dest,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
+  T return_val;
+  // This is a way to (hopefully) avoid dead lock in a warp
+  int done                 = 0;
+  unsigned int mask        = __activemask();
+  unsigned int active      = __ballot_sync(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda_space((void*)dest)) {
+        Kokkos::memory_fence();
+        return_val = *dest;
+        *dest      = return_val - val;
+        Kokkos::memory_fence();
+        Impl::unlock_address_cuda_space((void*)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot_sync(mask, done);
+  }
+  return return_val;
+}
+#endif
+#endif
+//----------------------------------------------------------------------------
+#if !defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) || defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+
+inline int atomic_fetch_sub(volatile int* const dest, const int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_sub(dest, val);
+}
+
+inline long int atomic_fetch_sub(volatile long int* const dest,
+                                 const long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_sub(dest, val);
+}
+
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS)
+
+inline unsigned int atomic_fetch_sub(volatile unsigned int* const dest,
+                                     const unsigned int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_sub(dest, val);
+}
+
+inline unsigned long int atomic_fetch_sub(
+    volatile unsigned long int* const dest, const unsigned long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_sub(dest, val);
+}
+
+inline unsigned long long int atomic_fetch_sub(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+  return __sync_fetch_and_sub(dest, val);
+}
+
+#endif
+
+template <typename T>
+inline T atomic_fetch_sub(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
+  union U {
+    int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t - val;
+    oldval.i = __sync_val_compare_and_swap((int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <typename T>
+inline T atomic_fetch_sub(
+    volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) && sizeof(T) == sizeof(long),
+                     const T>
+        val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  union U {
+    long i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+  oldval.t = *dest;
+
+  do {
+    assume.i = oldval.i;
+    newval.t = assume.t - val;
+    oldval.i = __sync_val_compare_and_swap((long*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+//----------------------------------------------------------------------------
+
+template <typename T>
+inline T atomic_fetch_sub(
+    volatile T* const dest,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T>& val) {
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)dest, _MM_HINT_ET0);
+#endif
+
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  T return_val = *dest;
+  *dest        = return_val - val;
+  Kokkos::memory_fence();
+  Impl::unlock_address_host_space((void*)dest);
+  return return_val;
+}
+
+//----------------------------------------------------------------------------
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <typename T>
+T atomic_fetch_sub(volatile T* const dest, const T val) {
+  T retval;
+#pragma omp atomic capture
+  {
+    retval = dest[0];
+    dest[0] -= val;
+  }
+  return retval;
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+T atomic_fetch_sub(volatile T* const dest_v, const T val) {
+  T* dest  = const_cast<T*>(dest_v);
+  T retval = *dest;
+  *dest -= val;
+  return retval;
+}
+
+#endif
+#endif
+
+// dummy for non-CUDA Kokkos headers being processed by NVCC
+#if defined(__CUDA_ARCH__) && !defined(KOKKOS_ENABLE_CUDA)
+template <typename T>
+__inline__ __device__ T atomic_fetch_sub(volatile T* const,
+                                         Kokkos::Impl::identity_t<T>) {
+  return T();
+}
+#endif
+
+}  // namespace Kokkos
+
+#include <impl/Kokkos_Atomic_Assembly.hpp>
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Generic.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Generic.hpp
new file mode 100644 (file)
index 0000000..aac0d12
--- /dev/null
@@ -0,0 +1,555 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_GENERIC_HPP)
+#define KOKKOS_ATOMIC_GENERIC_HPP
+#include <Kokkos_Macros.hpp>
+
+// Combination operands to be used in an Compare and Exchange based atomic
+// operation
+namespace Kokkos {
+namespace Impl {
+
+template <class Op, class Scalar1, class Scalar2, class Enable = bool>
+struct _check_early_exit_impl {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static constexpr bool check(Op const&, Scalar1 const&,
+                              Scalar2 const&) noexcept {
+    return false;
+  }
+};
+
+template <class Op, class Scalar1, class Scalar2>
+struct _check_early_exit_impl<
+    Op, Scalar1, Scalar2,
+    decltype(std::declval<Op const&>().check_early_exit(
+        std::declval<Scalar1 const&>(), std::declval<Scalar2 const&>()))> {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static constexpr bool check(Op const& op, Scalar1 const& v1,
+                              Scalar2 const& v2) {
+    return op.check_early_exit(v1, v2);
+  }
+};
+
+template <class Op, class Scalar1, class Scalar2>
+KOKKOS_FORCEINLINE_FUNCTION constexpr bool check_early_exit(
+    Op const& op, Scalar1 const& v1, Scalar2 const& v2) noexcept {
+  return _check_early_exit_impl<Op, Scalar1, Scalar2>::check(op, v1, v2);
+}
+
+template <class Scalar1, class Scalar2>
+struct MaxOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return (val1 > val2 ? val1 : val2);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION
+  static constexpr bool check_early_exit(Scalar1 const& val1,
+                                         Scalar2 const& val2) noexcept {
+    return (val1 > val2);
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct MinOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return (val1 < val2 ? val1 : val2);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION
+  static constexpr bool check_early_exit(Scalar1 const& val1,
+                                         Scalar2 const& val2) noexcept {
+    return (val1 < val2);
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct AddOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 + val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct SubOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 - val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct MulOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 * val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct DivOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 / val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct ModOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 % val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct AndOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 & val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct OrOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 | val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct XorOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 ^ val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct LShiftOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 << val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct RShiftOper {
+  KOKKOS_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 >> val2;
+  }
+};
+
+template <class Oper, typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_oper(
+    const Oper& op, volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T>
+        val) {
+  union U {
+    unsigned long long int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+  oldval.t = *dest;
+
+  do {
+    if (check_early_exit(op, oldval.t, val)) return oldval.t;
+    assume.i = oldval.i;
+    newval.t = op.apply(assume.t, val);
+    oldval.i = Kokkos::atomic_compare_exchange((unsigned long long int*)dest,
+                                               assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <class Oper, typename T>
+KOKKOS_INLINE_FUNCTION T atomic_oper_fetch(
+    const Oper& op, volatile T* const dest,
+    std::enable_if_t<sizeof(T) != sizeof(int) &&
+                         sizeof(T) == sizeof(unsigned long long int),
+                     const T>
+        val) {
+  union U {
+    unsigned long long int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+  oldval.t = *dest;
+
+  do {
+    if (check_early_exit(op, oldval.t, val)) return oldval.t;
+    assume.i = oldval.i;
+    newval.t = op.apply(assume.t, val);
+    oldval.i = Kokkos::atomic_compare_exchange((unsigned long long int*)dest,
+                                               assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return newval.t;
+}
+
+template <class Oper, typename T>
+KOKKOS_INLINE_FUNCTION T
+atomic_fetch_oper(const Oper& op, volatile T* const dest,
+                  std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
+  union U {
+    int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+  oldval.t = *dest;
+
+  do {
+    if (check_early_exit(op, oldval.t, val)) return oldval.t;
+    assume.i = oldval.i;
+    newval.t = op.apply(assume.t, val);
+    oldval.i = Kokkos::atomic_compare_exchange((int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return oldval.t;
+}
+
+template <class Oper, typename T>
+KOKKOS_INLINE_FUNCTION T
+atomic_oper_fetch(const Oper& op, volatile T* const dest,
+                  std::enable_if_t<sizeof(T) == sizeof(int), const T> val) {
+  union U {
+    int i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } oldval, assume, newval;
+
+  oldval.t = *dest;
+
+  do {
+    if (check_early_exit(op, oldval.t, val)) return oldval.t;
+    assume.i = oldval.i;
+    newval.t = op.apply(assume.t, val);
+    oldval.i = Kokkos::atomic_compare_exchange((int*)dest, assume.i, newval.i);
+  } while (assume.i != oldval.i);
+
+  return newval.t;
+}
+
+template <class Oper, typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_oper(
+    const Oper& op, volatile T* const dest,
+    std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8), const T> val) {
+#ifdef KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  T return_val = *dest;
+  *dest        = op.apply(return_val, val);
+  Kokkos::memory_fence();
+  Impl::unlock_address_host_space((void*)dest);
+  return return_val;
+#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA)
+  // This is a way to (hopefully) avoid dead lock in a warp
+  T return_val;
+  int done                 = 0;
+  unsigned int mask        = __activemask();
+  unsigned int active      = __ballot_sync(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda_space((void*)dest)) {
+        Kokkos::memory_fence();
+        return_val = *dest;
+        *dest      = op.apply(return_val, val);
+        Kokkos::memory_fence();
+        Impl::unlock_address_cuda_space((void*)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot_sync(mask, done);
+  }
+  return return_val;
+#elif defined(__HIP_DEVICE_COMPILE__)
+  T return_val             = *dest;
+  int done                 = 0;
+  unsigned int active      = __ballot(1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip_space((void*)dest)) {
+        return_val = *dest;
+        *dest      = op.apply(return_val, val);
+        Impl::unlock_address_hip_space((void*)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot(done);
+  }
+  return return_val;
+#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL)
+  // FIXME_SYCL
+  Kokkos::abort("Not implemented!");
+  (void)op;
+  (void)dest;
+  (void)val;
+  return 0;
+#endif
+}
+
+template <class Oper, typename T>
+KOKKOS_INLINE_FUNCTION T
+atomic_oper_fetch(const Oper& op, volatile T* const dest,
+                  std::enable_if_t<(sizeof(T) != 4) && (sizeof(T) != 8)
+#if defined(KOKKOS_ENABLE_ASM) && \
+    defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST)
+                                       && (sizeof(T) != 16)
+#endif
+                                       ,
+                                   const T>& val) {
+
+#ifdef KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_HOST
+  while (!Impl::lock_address_host_space((void*)dest))
+    ;
+  Kokkos::memory_fence();
+  T return_val = op.apply(*dest, val);
+  *dest        = return_val;
+  Kokkos::memory_fence();
+  Impl::unlock_address_host_space((void*)dest);
+  return return_val;
+#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_CUDA)
+  T return_val;
+  // This is a way to (hopefully) avoid dead lock in a warp
+  int done                 = 0;
+  unsigned int mask        = __activemask();
+  unsigned int active      = __ballot_sync(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda_space((void*)dest)) {
+        Kokkos::memory_fence();
+        return_val = op.apply(*dest, val);
+        *dest      = return_val;
+        Kokkos::memory_fence();
+        Impl::unlock_address_cuda_space((void*)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot_sync(mask, done);
+  }
+  return return_val;
+#elif defined(__HIP_DEVICE_COMPILE__)
+  T return_val;
+  int done                 = 0;
+  unsigned int active      = __ballot(1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip_space((void*)dest)) {
+        return_val = op.apply(*dest, val);
+        *dest      = return_val;
+        Impl::unlock_address_hip_space((void*)dest);
+        done = 1;
+      }
+    }
+    done_active = __ballot(done);
+  }
+  return return_val;
+#elif defined(KOKKOS_ACTIVE_EXECUTION_MEMORY_SPACE_SYCL)
+  // FIXME_SYCL
+  std::abort();
+  (void)op;
+  (void)dest;
+  (void)val;
+  return 0;
+#endif
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+
+// Fetch_Oper atomics: return value before operation
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_max(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::MaxOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_min(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::MinOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_mul(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::MulOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_div(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::DivOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_mod(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::ModOper<T, const T>(), dest, val);
+}
+
+#if !defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_and(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::AndOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_or(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::OrOper<T, const T>(), dest, val);
+}
+
+#endif
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_xor(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::XorOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_lshift(volatile T* const dest,
+                                             const unsigned int val) {
+  return Impl::atomic_fetch_oper(Impl::LShiftOper<T, const unsigned int>(),
+                                 dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_rshift(volatile T* const dest,
+                                             const unsigned int val) {
+  return Impl::atomic_fetch_oper(Impl::RShiftOper<T, const unsigned int>(),
+                                 dest, val);
+}
+
+// Oper Fetch atomics: return value after operation
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_max_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::MaxOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_min_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::MinOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_mul_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::MulOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_div_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::DivOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_mod_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::ModOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_and_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::AndOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_or_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::OrOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_xor_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::XorOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_lshift_fetch(volatile T* const dest,
+                                             const unsigned int val) {
+  return Impl::atomic_oper_fetch(Impl::LShiftOper<T, const unsigned int>(),
+                                 dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_rshift_fetch(volatile T* const dest,
+                                             const unsigned int val) {
+  return Impl::atomic_oper_fetch(Impl::RShiftOper<T, const unsigned int>(),
+                                 dest, val);
+}
+
+#ifdef _WIN32
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_add_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::AddOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_sub_fetch(volatile T* const dest, const T val) {
+  return Impl::atomic_oper_fetch(Impl::SubOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_add(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::AddOper<T, const T>(), dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_fetch_sub(volatile T* const dest, const T val) {
+  return Impl::atomic_fetch_oper(Impl::SubOper<T, const T>(), dest, val);
+}
+#endif
+
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Generic_Secondary.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Generic_Secondary.hpp
new file mode 100644 (file)
index 0000000..7ab6358
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_GENERIC_SECONDARY_HPP)
+#define KOKKOS_ATOMIC_GENERIC_SECONDARY_HPP
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+
+#ifndef KOKKOS_ENABLE_SERIAL_ATOMICS
+template <typename T>
+KOKKOS_INLINE_FUNCTION T atomic_exchange(volatile T* const dest, const T val) {
+  T oldval = *dest;
+  T assume;
+  do {
+    assume = oldval;
+    oldval = atomic_compare_exchange(dest, assume, val);
+  } while (assume != oldval);
+
+  return oldval;
+}
+#endif
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_add(volatile T* const dest, const T val) {
+  (void)atomic_fetch_add(dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_sub(volatile T* const dest, const T val) {
+  (void)atomic_fetch_sub(dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_mul(volatile T* const dest, const T val) {
+  (void)atomic_fetch_mul(dest, val);
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_div(volatile T* const dest, const T val) {
+  (void)atomic_fetch_div(dest, val);
+}
+
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Increment.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Increment.hpp
new file mode 100644 (file)
index 0000000..65630aa
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+#include <xmmintrin.h>
+#endif
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_INCREMENT_HPP)
+#define KOKKOS_ATOMIC_INCREMENT_HPP
+
+namespace Kokkos {
+
+// Atomic increment
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_increment<char>(volatile char* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock incb %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  char* a_nv = const_cast<char*>(a);
+  ++(*a_nv);
+#else
+  Kokkos::atomic_fetch_add(a, char(1));
+#endif
+}
+
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_increment<short>(volatile short* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock incw %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  short* a_nv = const_cast<short*>(a);
+  ++(*a_nv);
+#else
+  Kokkos::atomic_fetch_add(a, short(1));
+#endif
+}
+
+#ifndef _WIN32
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_increment<int>(volatile int* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock incl %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  int* a_nv = const_cast<int*>(a);
+  ++(*a_nv);
+#else
+  Kokkos::atomic_fetch_add(a, int(1));
+#endif
+}
+#endif
+
+template <>
+KOKKOS_INLINE_FUNCTION void atomic_increment<long long int>(
+    volatile long long int* a) {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64) && \
+    !defined(_WIN32) && !defined(__CUDA_ARCH__)
+#if defined(KOKKOS_ENABLE_RFO_PREFETCH)
+  _mm_prefetch((const char*)a, _MM_HINT_ET0);
+#endif
+  __asm__ __volatile__("lock incq %0"
+                       : /* no output registers */
+                       : "m"(a[0])
+                       : "memory");
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  long long int* a_nv = const_cast<long long int*>(a);
+  ++(*a_nv);
+#else
+  using T = long long int;
+  Kokkos::atomic_fetch_add(a, T(1));
+#endif
+}
+
+template <typename T>
+KOKKOS_INLINE_FUNCTION void atomic_increment(volatile T* a) {
+#if defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+  T* a_nv = const_cast<T*>(a);
+  ++(*a_nv);
+#else
+  Kokkos::atomic_fetch_add(a, T(1));
+#endif
+}
+
+}  // End of namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Load.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Load.hpp
new file mode 100644 (file)
index 0000000..f443732
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_ATOMIC_LOAD_HPP
+#define KOKKOS_IMPL_KOKKOS_ATOMIC_LOAD_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP)
+
+#include <impl/Kokkos_Atomic_Memory_Order.hpp>
+#include <impl/Kokkos_Atomic_Generic.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+// Olivier's implementation helpfully binds to the same builtins as GNU, so
+// we make this code common across multiple options
+#if (defined(KOKKOS_ENABLE_GNU_ATOMICS) && !defined(__CUDA_ARCH__)) ||   \
+    (defined(KOKKOS_ENABLE_INTEL_ATOMICS) && !defined(__CUDA_ARCH__)) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+
+#if defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH __inline__ __device__
+#else
+#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH inline
+#endif
+
+template <class T, class MemoryOrder>
+KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH T _atomic_load(
+    T* ptr, MemoryOrder,
+    std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
+                      sizeof(T) == 8) &&
+                         std::is_same<typename MemoryOrder::memory_order,
+                                      std::remove_cv_t<MemoryOrder>>::value,
+                     void const**> = nullptr) {
+  return __atomic_load_n(ptr, MemoryOrder::gnu_constant);
+}
+
+template <class T, class MemoryOrder>
+KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH T _atomic_load(
+    T* ptr, MemoryOrder,
+    std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
+                       sizeof(T) == 8) &&
+                         std::is_default_constructible<T>::value &&
+                         std::is_same<typename MemoryOrder::memory_order,
+                                      std::remove_cv_t<MemoryOrder>>::value,
+                     void const**> = nullptr) {
+  T rv{};
+  __atomic_load(ptr, &rv, MemoryOrder::gnu_constant);
+  return rv;
+}
+
+#undef KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH
+
+#elif defined(__CUDA_ARCH__)
+
+// Not compiling for Volta or later, or Cuda ASM atomics were manually disabled
+
+template <class T>
+__device__ __inline__ T _relaxed_atomic_load_impl(
+    T* ptr, std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 ||
+                              sizeof(T) == 4 || sizeof(T) == 8),
+                             void const**> = nullptr) {
+  return *ptr;
+}
+
+template <class T>
+struct NoOpOper {
+  __device__ __inline__ static constexpr T apply(T const& t,
+                                                 T const&) noexcept {
+    return t;
+  }
+};
+
+template <class T>
+__device__ __inline__ T _relaxed_atomic_load_impl(
+    T* ptr, std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 ||
+                               sizeof(T) == 4 || sizeof(T) == 8),
+                             void const**> = nullptr) {
+  T rv{};
+  // TODO remove a copy operation here?
+  return Kokkos::Impl::atomic_oper_fetch(NoOpOper<T>{}, ptr, rv);
+}
+
+template <class T>
+__device__ __inline__ T _atomic_load(T* ptr, memory_order_seq_cst_t) {
+  Kokkos::memory_fence();
+  T rv = Impl::_relaxed_atomic_load_impl(ptr);
+  Kokkos::memory_fence();
+  return rv;
+}
+
+template <class T>
+__device__ __inline__ T _atomic_load(T* ptr, memory_order_acquire_t) {
+  T rv = Impl::_relaxed_atomic_load_impl(ptr);
+  Kokkos::memory_fence();
+  return rv;
+}
+
+template <class T>
+__device__ __inline__ T _atomic_load(T* ptr, memory_order_relaxed_t) {
+  return _relaxed_atomic_load_impl(ptr);
+}
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <class T, class MemoryOrder>
+inline T _atomic_load(T* ptr, MemoryOrder) {
+  // AFAICT, all OpenMP atomics are sequentially consistent, so memory order
+  // doesn't matter
+  T retval{};
+#pragma omp atomic read
+  { retval = *ptr; }
+  return retval;
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <class T, class MemoryOrder>
+inline T _atomic_load(T* ptr, MemoryOrder) {
+  return *ptr;
+}
+
+#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
+
+template <class T, class MemoryOrder>
+inline T _atomic_load(T* ptr, MemoryOrder) {
+  atomic_compare_exchange(ptr, 0, 0);
+  return *ptr;
+}
+
+#endif  // end of all atomic implementations
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr,
+                                          Impl::memory_order_seq_cst_t) {
+  return _atomic_load(ptr, Impl::memory_order_seq_cst);
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr,
+                                          Impl::memory_order_acquire_t) {
+  return _atomic_load(ptr, Impl::memory_order_acquire);
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr,
+                                          Impl::memory_order_relaxed_t) {
+  return _atomic_load(ptr, Impl::memory_order_relaxed);
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* /*ptr*/,
+                                          Impl::memory_order_release_t) {
+  static_assert(
+      sizeof(T) == 0,  // just something that will always be false, but only on
+                       // instantiation
+      "atomic_load with memory order release doesn't make any sense!");
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* /*ptr*/,
+                                          Impl::memory_order_acq_rel_t) {
+  static_assert(
+      sizeof(T) == 0,  // just something that will always be false, but only on
+                       // instantiation
+      "atomic_load with memory order acq_rel doesn't make any sense!");
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION T atomic_load(T* ptr) {
+  // relaxed by default!
+  return _atomic_load(ptr, Impl::memory_order_relaxed);
+}
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp>
+#endif
+
+#endif  // defined(KOKKOS_ATOMIC_HPP)
+#endif  // KOKKOS_IMPL_KOKKOS_ATOMIC_LOAD_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Memory_Order.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Memory_Order.hpp
new file mode 100644 (file)
index 0000000..72a6dfa
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_ATOMIC_MEMORY_ORDER_HPP
+#define KOKKOS_KOKKOS_ATOMIC_MEMORY_ORDER_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <atomic>
+
+namespace Kokkos {
+namespace Impl {
+
+/** @file
+ * Provides strongly-typed analogs of the standard memory order enumerators.
+ * In addition to (very slightly) reducing the constant propagation burden on
+ * the compiler, this allows us to give compile-time errors for things that
+ * don't make sense, like atomic_load with memory order release.
+ */
+
+struct memory_order_seq_cst_t {
+  using memory_order = memory_order_seq_cst_t;
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) ||   \
+    defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+  static constexpr auto gnu_constant = __ATOMIC_SEQ_CST;
+#endif
+  static constexpr auto std_constant = std::memory_order_seq_cst;
+};
+constexpr memory_order_seq_cst_t memory_order_seq_cst = {};
+
+struct memory_order_relaxed_t {
+  using memory_order = memory_order_relaxed_t;
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) ||   \
+    defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+  static constexpr auto gnu_constant = __ATOMIC_RELAXED;
+#endif
+  static constexpr auto std_constant = std::memory_order_relaxed;
+};
+constexpr memory_order_relaxed_t memory_order_relaxed = {};
+
+struct memory_order_acquire_t {
+  using memory_order = memory_order_acquire_t;
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) ||   \
+    defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+  static constexpr auto gnu_constant = __ATOMIC_ACQUIRE;
+#endif
+  static constexpr auto std_constant = std::memory_order_acquire;
+};
+constexpr memory_order_acquire_t memory_order_acquire = {};
+
+struct memory_order_release_t {
+  using memory_order = memory_order_release_t;
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) ||   \
+    defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+  static constexpr auto gnu_constant = __ATOMIC_RELEASE;
+#endif
+  static constexpr auto std_constant = std::memory_order_release;
+};
+constexpr memory_order_release_t memory_order_release = {};
+
+struct memory_order_acq_rel_t {
+  using memory_order = memory_order_acq_rel_t;
+#if defined(KOKKOS_ENABLE_GNU_ATOMICS) ||   \
+    defined(KOKKOS_ENABLE_INTEL_ATOMICS) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+  static constexpr auto gnu_constant = __ATOMIC_ACQ_REL;
+#endif
+  static constexpr auto std_constant = std::memory_order_acq_rel;
+};
+constexpr memory_order_acq_rel_t memory_order_acq_rel = {};
+
+// Intentionally omit consume (for now)
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_ATOMIC_MEMORY_ORDER_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_MinMax.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_MinMax.hpp
new file mode 100644 (file)
index 0000000..7338a5c
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_ATOMIC_MINMAX_HPP)
+#define KOKKOS_ATOMIC_MINMAX_HPP
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#if defined(__CUDA_ARCH__) || defined(KOKKOS_IMPL_CUDA_CLANG_WORKAROUND)
+
+// Support for int, unsigned int, unsigned long long int, and float
+
+// Atomic_fetch_{min,max}
+
+#ifdef KOKKOS_IMPL_CUDA_CLANG_WORKAROUND
+
+// Host implementations for CLANG compiler
+
+inline __host__ int atomic_fetch_min(volatile int* const dest, const int val) {
+  return Impl::atomic_fetch_oper(Impl::MinOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __host__ unsigned int atomic_fetch_min(volatile unsigned int* const dest,
+                                              const unsigned int val) {
+  return Impl::atomic_fetch_oper(
+      Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __host__ unsigned long long int atomic_fetch_min(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_fetch_oper(Impl::MinOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+
+inline __host__ int atomic_fetch_max(volatile int* const dest, const int val) {
+  return Impl::atomic_fetch_oper(Impl::MaxOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __host__ unsigned int atomic_fetch_max(volatile unsigned int* const dest,
+                                              const unsigned int val) {
+  return Impl::atomic_fetch_oper(
+      Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __host__ unsigned long long int atomic_fetch_max(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_fetch_oper(Impl::MaxOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+
+#endif
+
+#if (350 > __CUDA_ARCH__)
+
+// Fallback for atomic{Min,Max} for Kepler
+
+inline __device__ int atomic_fetch_min(volatile int* const dest,
+                                       const int val) {
+  return Impl::atomic_fetch_oper(Impl::MinOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __device__ unsigned int atomic_fetch_min(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return Impl::atomic_fetch_oper(
+      Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __device__ unsigned long long int atomic_fetch_min(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_fetch_oper(Impl::MinOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+
+inline __device__ int atomic_fetch_max(volatile int* const dest,
+                                       const int val) {
+  return Impl::atomic_fetch_oper(Impl::MaxOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __device__ unsigned int atomic_fetch_max(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return Impl::atomic_fetch_oper(
+      Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __device__ unsigned long long int atomic_fetch_max(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_fetch_oper(Impl::MaxOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+
+#else  // Supported by devices of compute capability 3.5 and higher
+
+inline __device__ int atomic_fetch_min(volatile int* const dest,
+                                       const int val) {
+  return atomicMin((int*)dest, val);
+}
+
+inline __device__ unsigned int atomic_fetch_min(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return atomicMin((unsigned int*)dest, val);
+}
+
+inline __device__ unsigned long long int atomic_fetch_min(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return atomicMin((unsigned long long int*)dest, val);
+}
+
+inline __device__ int atomic_fetch_max(volatile int* const dest,
+                                       const int val) {
+  return atomicMax((int*)dest, val);
+}
+
+inline __device__ unsigned int atomic_fetch_max(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return atomicMax((unsigned int*)dest, val);
+}
+
+inline __device__ unsigned long long int atomic_fetch_max(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return atomicMax((unsigned long long int*)dest, val);
+}
+
+#endif
+
+// Atomic_{min,max}_fetch
+
+#ifdef KOKKOS_IMPL_CUDA_CLANG_WORKAROUND
+
+// Host implementations for CLANG compiler
+
+inline __host__ int atomic_min_fetch(volatile int* const dest, const int val) {
+  return Impl::atomic_oper_fetch(Impl::MinOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __host__ unsigned int atomic_min_fetch(volatile unsigned int* const dest,
+                                              const unsigned int val) {
+  return Impl::atomic_oper_fetch(
+      Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __host__ unsigned long long int atomic_min_fetch(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_oper_fetch(Impl::MinOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+
+inline __host__ int atomic_max_fetch(volatile int* const dest, const int val) {
+  return Impl::atomic_oper_fetch(Impl::MaxOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __host__ unsigned int atomic_max_fetch(volatile unsigned int* const dest,
+                                              const unsigned int val) {
+  return Impl::atomic_oper_fetch(
+      Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __host__ unsigned long long int atomic_max_fetch(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_oper_fetch(Impl::MaxOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+#endif
+
+#if (350 > __CUDA_ARCH__)
+
+// Fallback for atomic{Min,Max} for Kepler
+
+inline __device__ int atomic_min_fetch(volatile int* const dest,
+                                       const int val) {
+  return Impl::atomic_oper_fetch(Impl::MinOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __device__ unsigned int atomic_min_fetch(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return Impl::atomic_oper_fetch(
+      Impl::MinOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __device__ unsigned long long int atomic_min_fetch(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_oper_fetch(Impl::MinOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+
+inline __device__ int atomic_max_fetch(volatile int* const dest,
+                                       const int val) {
+  return Impl::atomic_oper_fetch(Impl::MaxOper<const int, const int>(), dest,
+                                 val);
+}
+
+inline __device__ unsigned int atomic_max_fetch(
+    volatile unsigned int* const dest, const unsigned int val) {
+  return Impl::atomic_oper_fetch(
+      Impl::MaxOper<const unsigned int, const unsigned int>(), dest, val);
+}
+
+inline __device__ unsigned long long int atomic_max_fetch(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  return Impl::atomic_oper_fetch(Impl::MaxOper<const unsigned long long int,
+                                               const unsigned long long int>(),
+                                 dest, val);
+}
+
+#else  // Supported by devices of compute capability 3.5 and higher
+
+inline __device__ int atomic_min_fetch(volatile int* const dest,
+                                       const int val) {
+  const int old = atomicMin((int*)dest, val);
+  return old < val ? old : val;
+}
+
+inline __device__ unsigned int atomic_min_fetch(
+    volatile unsigned int* const dest, const unsigned int val) {
+  const unsigned int old = atomicMin((unsigned int*)dest, val);
+  return old < val ? old : val;
+}
+
+inline __device__ unsigned long long int atomic_min_fetch(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  const unsigned long long old = atomicMin((unsigned long long*)dest, val);
+  return old < val ? old : val;
+}
+
+inline __device__ int atomic_max_fetch(volatile int* const dest,
+                                       const int val) {
+  const int old = atomicMax((int*)dest, val);
+  return old >= val ? old : val;
+}
+
+inline __device__ unsigned int atomic_max_fetch(
+    volatile unsigned int* const dest, const unsigned int val) {
+  const unsigned int old = atomicMax((unsigned int*)dest, val);
+  return old >= val ? old : val;
+}
+
+inline __device__ unsigned long long int atomic_max_fetch(
+    volatile unsigned long long int* const dest,
+    const unsigned long long int val) {
+  const unsigned long long old = atomicMax((unsigned long long*)dest, val);
+  return old >= val ? old : val;
+}
+
+#endif
+
+#endif
+#endif
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Store.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Store.hpp
new file mode 100644 (file)
index 0000000..ffe018b
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_ATOMIC_STORE_HPP
+#define KOKKOS_IMPL_KOKKOS_ATOMIC_STORE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP)
+
+#include <impl/Kokkos_Atomic_Memory_Order.hpp>
+#include <impl/Kokkos_Atomic_Generic.hpp>
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics.hpp>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+// Olivier's implementation helpfully binds to the same builtins as GNU, so
+// we make this code common across multiple options
+#if (defined(KOKKOS_ENABLE_GNU_ATOMICS) && !defined(__CUDA_ARCH__)) ||   \
+    (defined(KOKKOS_ENABLE_INTEL_ATOMICS) && !defined(__CUDA_ARCH__)) || \
+    defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+
+#if defined(__CUDA_ARCH__) && defined(KOKKOS_ENABLE_CUDA_ASM_ATOMICS)
+#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH __inline__ __device__
+#else
+#define KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH inline
+#endif
+
+template <class T, class MemoryOrder>
+KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH void _atomic_store(
+    T* ptr, T val, MemoryOrder,
+    std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
+                      sizeof(T) == 8) &&
+                         std::is_same<typename MemoryOrder::memory_order,
+                                      std::remove_cv_t<MemoryOrder>>::value,
+                     void const**> = nullptr) {
+  __atomic_store_n(ptr, val, MemoryOrder::gnu_constant);
+}
+
+template <class T, class MemoryOrder>
+KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH void _atomic_store(
+    T* ptr, T val, MemoryOrder,
+    std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
+                       sizeof(T) == 8) &&
+                         std::is_default_constructible<T>::value &&
+                         std::is_same<typename MemoryOrder::memory_order,
+                                      std::remove_cv_t<MemoryOrder>>::value,
+                     void const**> = nullptr) {
+  __atomic_store(ptr, &val, MemoryOrder::gnu_constant);
+}
+
+#undef KOKKOS_INTERNAL_INLINE_DEVICE_IF_CUDA_ARCH
+
+#elif defined(__CUDA_ARCH__)
+
+// Not compiling for Volta or later, or Cuda ASM atomics were manually disabled
+
+template <class T>
+__device__ __inline__ void _relaxed_atomic_store_impl(
+    T* ptr, T val,
+    std::enable_if_t<(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
+                      sizeof(T) == 8),
+                     void const**> = nullptr) {
+  *ptr = val;
+}
+
+template <class T>
+struct StoreOper {
+  __device__ __inline__ static constexpr T apply(T const&,
+                                                 T const& val) noexcept {
+    return val;
+  }
+};
+
+template <class T>
+__device__ __inline__ void _relaxed_atomic_store_impl(
+    T* ptr, T val,
+    std::enable_if_t<!(sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 ||
+                       sizeof(T) == 8),
+                     void const**> = nullptr) {
+  Kokkos::Impl::atomic_oper_fetch(StoreOper<T>{}, ptr, (T &&) val);
+}
+
+template <class T>
+__device__ __inline__ void _atomic_store(T* ptr, T val,
+                                         memory_order_seq_cst_t) {
+  Kokkos::memory_fence();
+  Impl::_relaxed_atomic_store_impl(ptr, val);
+  Kokkos::memory_fence();
+}
+
+template <class T>
+__device__ __inline__ void _atomic_store(T* ptr, T val,
+                                         memory_order_release_t) {
+  Kokkos::memory_fence();
+  _relaxed_atomic_store_impl(ptr, val);
+}
+
+template <class T>
+__device__ __inline__ void _atomic_store(T* ptr, T val,
+                                         memory_order_relaxed_t) {
+  _relaxed_atomic_store_impl(ptr, val);
+}
+
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+
+template <class T, class MemoryOrder>
+inline void _atomic_store(T* ptr, T val, MemoryOrder) {
+  // AFAICT, all OpenMP atomics are sequentially consistent, so memory order
+  // doesn't matter
+#pragma omp atomic write
+  { *ptr = val; }
+}
+
+#elif defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+
+template <class T, class MemoryOrder>
+inline void _atomic_store(T* ptr, T val, MemoryOrder) {
+  *ptr = val;
+}
+
+#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
+
+template <class T, class MemoryOrder>
+inline void _atomic_store(T* ptr, T val, MemoryOrder) {
+  atomic_exchange(ptr, val);
+}
+
+#endif  // end of all atomic implementations
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val,
+                                              Impl::memory_order_seq_cst_t) {
+  _atomic_store(ptr, val, Impl::memory_order_seq_cst);
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val,
+                                              Impl::memory_order_release_t) {
+  _atomic_store(ptr, val, Impl::memory_order_release);
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val,
+                                              Impl::memory_order_relaxed_t) {
+  _atomic_store(ptr, val, Impl::memory_order_relaxed);
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* /*ptr*/, T /*val*/,
+                                              Impl::memory_order_acquire_t) {
+  static_assert(
+      sizeof(T) == 0,  // just something that will always be false, but only on
+                       // instantiation
+      "atomic_store with memory order acquire doesn't make any sense!");
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* /*ptr*/, T /*val*/,
+                                              Impl::memory_order_acq_rel_t) {
+  static_assert(
+      sizeof(T) == 0,  // just something that will always be false, but only on
+                       // instantiation
+      "atomic_store with memory order acq_rel doesn't make any sense!");
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION void atomic_store(T* ptr, T val) {
+  // relaxed by default!
+  _atomic_store(ptr, val, Impl::memory_order_relaxed);
+}
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#if defined(KOKKOS_ENABLE_CUDA)
+#include <Cuda/Kokkos_Cuda_Atomic_Intrinsics_Restore_Builtins.hpp>
+#endif
+
+#endif  // defined(KOKKOS_ATOMIC_HPP)
+#endif  // KOKKOS_IMPL_KOKKOS_ATOMIC_STORE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_View.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_View.hpp
new file mode 100644 (file)
index 0000000..f763f8c
--- /dev/null
@@ -0,0 +1,384 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_ATOMIC_VIEW_HPP
+#define KOKKOS_ATOMIC_VIEW_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Atomic.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// The following tag is used to prevent an implicit call of the constructor when
+// trying to assign a literal 0 int ( = 0 );
+struct AtomicViewConstTag {};
+
+template <class ViewTraits>
+class AtomicDataElement {
+ public:
+  using value_type           = typename ViewTraits::value_type;
+  using const_value_type     = typename ViewTraits::const_value_type;
+  using non_const_value_type = typename ViewTraits::non_const_value_type;
+  volatile value_type* const ptr;
+
+  KOKKOS_INLINE_FUNCTION
+  AtomicDataElement(value_type* ptr_, AtomicViewConstTag) : ptr(ptr_) {}
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator=(const_value_type& val) const {
+    *ptr = val;
+    return val;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator=(volatile const_value_type& val) const {
+    *ptr = val;
+    return val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void inc() const { Kokkos::atomic_increment(ptr); }
+
+  KOKKOS_INLINE_FUNCTION
+  void dec() const { Kokkos::atomic_decrement(ptr); }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator++() const {
+    const_value_type tmp =
+        Kokkos::atomic_fetch_add(ptr, non_const_value_type(1));
+    return tmp + 1;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator--() const {
+    const_value_type tmp =
+        Kokkos::atomic_fetch_sub(ptr, non_const_value_type(1));
+    return tmp - 1;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator++(int) const {
+    return Kokkos::atomic_fetch_add(ptr, non_const_value_type(1));
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator--(int) const {
+    return Kokkos::atomic_fetch_sub(ptr, non_const_value_type(1));
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator+=(const_value_type& val) const {
+    const_value_type tmp = Kokkos::atomic_fetch_add(ptr, val);
+    return tmp + val;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator+=(volatile const_value_type& val) const {
+    const_value_type tmp = Kokkos::atomic_fetch_add(ptr, val);
+    return tmp + val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator-=(const_value_type& val) const {
+    const_value_type tmp = Kokkos::atomic_fetch_sub(ptr, val);
+    return tmp - val;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator-=(volatile const_value_type& val) const {
+    const_value_type tmp = Kokkos::atomic_fetch_sub(ptr, val);
+    return tmp - val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator*=(const_value_type& val) const {
+    return Kokkos::atomic_mul_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator*=(volatile const_value_type& val) const {
+    return Kokkos::atomic_mul_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator/=(const_value_type& val) const {
+    return Kokkos::atomic_div_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator/=(volatile const_value_type& val) const {
+    return Kokkos::atomic_div_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator%=(const_value_type& val) const {
+    return Kokkos::atomic_mod_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator%=(volatile const_value_type& val) const {
+    return Kokkos::atomic_mod_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator&=(const_value_type& val) const {
+    return Kokkos::atomic_and_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator&=(volatile const_value_type& val) const {
+    return Kokkos::atomic_and_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator^=(const_value_type& val) const {
+    return Kokkos::atomic_xor_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator^=(volatile const_value_type& val) const {
+    return Kokkos::atomic_xor_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator|=(const_value_type& val) const {
+    return Kokkos::atomic_or_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator|=(volatile const_value_type& val) const {
+    return Kokkos::atomic_or_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator<<=(const_value_type& val) const {
+    return Kokkos::atomic_lshift_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator<<=(volatile const_value_type& val) const {
+    return Kokkos::atomic_lshift_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator>>=(const_value_type& val) const {
+    return Kokkos::atomic_rshift_fetch(ptr, val);
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator>>=(volatile const_value_type& val) const {
+    return Kokkos::atomic_rshift_fetch(ptr, val);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator+(const_value_type& val) const { return *ptr + val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator+(volatile const_value_type& val) const {
+    return *ptr + val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator-(const_value_type& val) const { return *ptr - val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator-(volatile const_value_type& val) const {
+    return *ptr - val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator*(const_value_type& val) const { return *ptr * val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator*(volatile const_value_type& val) const {
+    return *ptr * val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator/(const_value_type& val) const { return *ptr / val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator/(volatile const_value_type& val) const {
+    return *ptr / val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator%(const_value_type& val) const { return *ptr ^ val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator%(volatile const_value_type& val) const {
+    return *ptr ^ val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator!() const { return !*ptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator&&(const_value_type& val) const {
+    return *ptr && val;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator&&(volatile const_value_type& val) const {
+    return *ptr && val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator||(const_value_type& val) const {
+    return *ptr | val;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator||(volatile const_value_type& val) const {
+    return *ptr | val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator&(const_value_type& val) const { return *ptr & val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator&(volatile const_value_type& val) const {
+    return *ptr & val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator|(const_value_type& val) const { return *ptr | val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator|(volatile const_value_type& val) const {
+    return *ptr | val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator^(const_value_type& val) const { return *ptr ^ val; }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator^(volatile const_value_type& val) const {
+    return *ptr ^ val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator~() const { return ~*ptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator<<(const unsigned int& val) const {
+    return *ptr << val;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator<<(volatile const unsigned int& val) const {
+    return *ptr << val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator>>(const unsigned int& val) const {
+    return *ptr >> val;
+  }
+  KOKKOS_INLINE_FUNCTION
+  const_value_type operator>>(volatile const unsigned int& val) const {
+    return *ptr >> val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator==(const AtomicDataElement& val) const { return *ptr == val; }
+  KOKKOS_INLINE_FUNCTION
+  bool operator==(volatile const AtomicDataElement& val) const {
+    return *ptr == val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator!=(const AtomicDataElement& val) const { return *ptr != val; }
+  KOKKOS_INLINE_FUNCTION
+  bool operator!=(volatile const AtomicDataElement& val) const {
+    return *ptr != val;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator>=(const_value_type& val) const { return *ptr >= val; }
+  KOKKOS_INLINE_FUNCTION
+  bool operator>=(volatile const_value_type& val) const { return *ptr >= val; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator<=(const_value_type& val) const { return *ptr <= val; }
+  KOKKOS_INLINE_FUNCTION
+  bool operator<=(volatile const_value_type& val) const { return *ptr <= val; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator<(const_value_type& val) const { return *ptr < val; }
+  KOKKOS_INLINE_FUNCTION
+  bool operator<(volatile const_value_type& val) const { return *ptr < val; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator>(const_value_type& val) const { return *ptr > val; }
+  KOKKOS_INLINE_FUNCTION
+  bool operator>(volatile const_value_type& val) const { return *ptr > val; }
+
+  KOKKOS_INLINE_FUNCTION
+  operator const_value_type() const {
+    // return Kokkos::atomic_load(ptr);
+    return *ptr;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  operator non_const_value_type() volatile const {
+    return Kokkos::Impl::atomic_load(ptr);
+  }
+};
+
+template <class ViewTraits>
+class AtomicViewDataHandle {
+ public:
+  typename ViewTraits::value_type* ptr;
+
+  KOKKOS_INLINE_FUNCTION
+  AtomicViewDataHandle() : ptr(nullptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  AtomicViewDataHandle(typename ViewTraits::value_type* ptr_) : ptr(ptr_) {}
+
+  template <class iType>
+  KOKKOS_INLINE_FUNCTION AtomicDataElement<ViewTraits> operator[](
+      const iType& i) const {
+    return AtomicDataElement<ViewTraits>(ptr + i, AtomicViewConstTag());
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  operator typename ViewTraits::value_type*() const { return ptr; }
+};
+
+template <unsigned Size>
+struct Kokkos_Atomic_is_only_allowed_with_32bit_and_64bit_scalars;
+
+template <>
+struct Kokkos_Atomic_is_only_allowed_with_32bit_and_64bit_scalars<4> {
+  using type = int;
+};
+
+template <>
+struct Kokkos_Atomic_is_only_allowed_with_32bit_and_64bit_scalars<8> {
+  using type = int64_t;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Windows.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Atomic_Windows.hpp
new file mode 100644 (file)
index 0000000..c5207b5
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+#ifndef KOKKOS_ATOMIC_WINDOWS_HPP
+#define KOKKOS_ATOMIC_WINDOWS_HPP
+
+#ifdef _WIN32
+
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#include <winsock2.h>
+#include <windows.h>
+
+namespace Kokkos {
+namespace Impl {
+#ifdef _MSC_VER
+_declspec(align(16))
+#endif
+    struct cas128_t {
+  LONGLONG lower;
+  LONGLONG upper;
+  KOKKOS_INLINE_FUNCTION
+  bool operator!=(const cas128_t& a) const {
+    return (lower != a.lower) || upper != a.upper;
+  }
+}
+#if defined(__GNUC__) || defined(__clang__)
+__attribute__((aligned(16)))
+#endif
+;
+}  // namespace Impl
+
+#if !defined(__CUDA_ARCH__) || defined(__clang__)
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(CHAR), const T&> val) {
+  union U {
+    CHAR i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U(){};
+  } tmp;
+
+  tmp.i = _InterlockedCompareExchange8((CHAR*)dest, *((CHAR*)&val),
+                                       *((CHAR*)&compare));
+  return tmp.t;
+}
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(SHORT), const T&> val) {
+  union U {
+    SHORT i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U(){};
+  } tmp;
+
+  tmp.i = _InterlockedCompareExchange16((SHORT*)dest, *((SHORT*)&val),
+                                        *((SHORT*)&compare));
+  return tmp.t;
+}
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(LONG), const T&> val) {
+  union U {
+    LONG i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+  tmp.i = _InterlockedCompareExchange((LONG*)dest, *((LONG*)&val),
+                                      *((LONG*)&compare));
+  return tmp.t;
+}
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(LONGLONG), const T&> val) {
+  union U {
+    LONGLONG i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U() {}
+  } tmp;
+
+  tmp.i = _InterlockedCompareExchange64((LONGLONG*)dest, *((LONGLONG*)&val),
+                                        *((LONGLONG*)&compare));
+  return tmp.t;
+}
+
+template <typename T>
+inline T atomic_compare_exchange(
+    volatile T* const dest, const T& compare,
+    std::enable_if_t<sizeof(T) == sizeof(Impl::cas128_t), const T&> val) {
+  T compare_and_result(compare);
+  union U {
+    Impl::cas128_t i;
+    T t;
+    KOKKOS_INLINE_FUNCTION U(){};
+  } newval;
+  newval.t = val;
+  _InterlockedCompareExchange128((LONGLONG*)dest, newval.i.upper,
+                                 newval.i.lower,
+                                 ((LONGLONG*)&compare_and_result));
+  return compare_and_result;
+}
+#endif
+
+}  // namespace Kokkos
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_BitOps.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_BitOps.hpp
new file mode 100644 (file)
index 0000000..a41d19a
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_BITOPS_HPP
+#define KOKKOS_BITOPS_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <cstdint>
+#include <climits>
+
+#ifdef KOKKOS_COMPILER_INTEL
+#include <immintrin.h>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+KOKKOS_FORCEINLINE_FUNCTION
+int int_log2_fallback(unsigned i) {
+  constexpr int shift = sizeof(unsigned) * CHAR_BIT - 1;
+
+  int offset = 0;
+  if (i) {
+    for (offset = shift; (i & (1 << offset)) == 0; --offset)
+      ;
+  }
+  return offset;
+}
+
+KOKKOS_IMPL_DEVICE_FUNCTION
+inline int int_log2_device(unsigned i) {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+  constexpr int shift = sizeof(unsigned) * CHAR_BIT - 1;
+  return shift - __clz(i);
+#elif defined(KOKKOS_COMPILER_INTEL)
+  return _bit_scan_reverse(i);
+#else
+  return int_log2_fallback(i);
+#endif
+}
+
+KOKKOS_IMPL_HOST_FUNCTION
+inline int int_log2_host(unsigned i) {
+// duplicating shift to avoid unused warning in else branch
+#if defined(KOKKOS_COMPILER_INTEL)
+  constexpr int shift = sizeof(unsigned) * CHAR_BIT - 1;
+  (void)shift;
+  return _bit_scan_reverse(i);
+#elif defined(KOKKOS_COMPILER_CRAYC)
+  constexpr int shift = sizeof(unsigned) * CHAR_BIT - 1;
+  return i ? shift - _leadz32(i) : 0;
+#elif defined(__GNUC__) || defined(__GNUG__)
+  constexpr int shift = sizeof(unsigned) * CHAR_BIT - 1;
+  return shift - __builtin_clz(i);
+#else
+  return int_log2_fallback(i);
+#endif
+}
+
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma push
+#pragma diag_suppress implicit_return_from_non_void_function
+#endif
+KOKKOS_FORCEINLINE_FUNCTION
+int int_log2(unsigned i) {
+  KOKKOS_IF_ON_DEVICE((return int_log2_device(i);))
+  KOKKOS_IF_ON_HOST((return int_log2_host(i);))
+}
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma pop
+#endif
+
+/**\brief  Find first zero bit.
+ *
+ *  If none then return -1 ;
+ */
+KOKKOS_FORCEINLINE_FUNCTION
+int bit_first_zero_fallback(unsigned i) noexcept {
+  constexpr unsigned full = ~0u;
+
+  int offset = -1;
+  if (full != i) {
+    for (offset = 0; i & (1 << offset); ++offset)
+      ;
+  }
+  return offset;
+}
+
+KOKKOS_IMPL_DEVICE_FUNCTION
+inline int bit_first_zero_device(unsigned i) noexcept {
+  constexpr unsigned full = ~0u;
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+  return full != i ? __ffs(~i) - 1 : -1;
+#elif defined(KOKKOS_COMPILER_INTEL)
+  return full != i ? _bit_scan_forward(~i) : -1;
+#else
+  (void)full;
+  return bit_first_zero_fallback(i);
+#endif
+}
+
+KOKKOS_IMPL_HOST_FUNCTION
+inline int bit_first_zero_host(unsigned i) noexcept {
+  constexpr unsigned full = ~0u;
+#if defined(KOKKOS_COMPILER_INTEL)
+  return full != i ? _bit_scan_forward(~i) : -1;
+#elif defined(KOKKOS_COMPILER_CRAYC)
+  return full != i ? _popcnt(i ^ (i + 1)) - 1 : -1;
+#elif defined(KOKKOS_COMPILER_GNU) || defined(__GNUC__) || defined(__GNUG__)
+  return full != i ? __builtin_ffs(~i) - 1 : -1;
+#else
+  (void)full;
+  return bit_first_zero_fallback(i);
+#endif
+}
+
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma push
+#pragma diag_suppress implicit_return_from_non_void_function
+#endif
+KOKKOS_FORCEINLINE_FUNCTION
+int bit_first_zero(unsigned i) noexcept {
+  KOKKOS_IF_ON_DEVICE((return bit_first_zero_device(i);))
+  KOKKOS_IF_ON_HOST((return bit_first_zero_host(i);))
+}
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma pop
+#endif
+
+KOKKOS_FORCEINLINE_FUNCTION
+int bit_scan_forward_fallback(unsigned i) {
+  int offset = -1;
+  if (i) {
+    for (offset = 0; (i & (1 << offset)) == 0; ++offset)
+      ;
+  }
+  return offset;
+}
+
+KOKKOS_IMPL_DEVICE_FUNCTION inline int bit_scan_forward_device(unsigned i) {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+  return __ffs(i) - 1;
+#elif defined(KOKKOS_COMPILER_INTEL)
+  return _bit_scan_forward(i);
+#else
+  return bit_scan_forward_fallback(i);
+#endif
+}
+
+KOKKOS_IMPL_HOST_FUNCTION inline int bit_scan_forward_host(unsigned i) {
+#if defined(KOKKOS_COMPILER_INTEL)
+  return _bit_scan_forward(i);
+#elif defined(KOKKOS_COMPILER_CRAYC)
+  return i ? _popcnt(~i & (i - 1)) : -1;
+#elif defined(KOKKOS_COMPILER_GNU) || defined(__GNUC__) || defined(__GNUG__)
+  return __builtin_ffs(i) - 1;
+#else
+  return bit_scan_forward_fallback(i);
+#endif
+}
+
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma push
+#pragma diag_suppress implicit_return_from_non_void_function
+#endif
+KOKKOS_FORCEINLINE_FUNCTION
+int bit_scan_forward(unsigned i) {
+  KOKKOS_IF_ON_DEVICE((return bit_scan_forward_device(i);))
+  KOKKOS_IF_ON_HOST((return bit_scan_forward_host(i);))
+}
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma pop
+#endif
+
+/// Count the number of bits set.
+KOKKOS_FORCEINLINE_FUNCTION
+int bit_count_fallback(unsigned i) {
+  // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetNaive
+  i = i - ((i >> 1) & ~0u / 3u);                           // temp
+  i = (i & ~0u / 15u * 3u) + ((i >> 2) & ~0u / 15u * 3u);  // temp
+  i = (i + (i >> 4)) & ~0u / 255u * 15u;                   // temp
+
+  // count
+  return (int)((i * (~0u / 255u)) >> (sizeof(unsigned) - 1) * CHAR_BIT);
+}
+
+KOKKOS_IMPL_DEVICE_FUNCTION inline int bit_count_device(unsigned i) {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+  return __popc(i);
+#elif defined(KOKKOS_COMPILER_INTEL)
+  return _popcnt32(i);
+#else
+  return bit_count_fallback(i);
+#endif
+}
+
+KOKKOS_IMPL_HOST_FUNCTION inline int bit_count_host(unsigned i) {
+#if defined(KOKKOS_COMPILER_INTEL)
+  return _popcnt32(i);
+#elif defined(KOKKOS_COMPILER_CRAYC)
+  return _popcnt(i);
+#elif defined(__GNUC__) || defined(__GNUG__)
+  return __builtin_popcount(i);
+#else
+  return bit_count_fallback(i);
+#endif
+}
+
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma push
+#pragma diag_suppress implicit_return_from_non_void_function
+#endif
+KOKKOS_FORCEINLINE_FUNCTION
+int bit_count(unsigned i) {
+  KOKKOS_IF_ON_DEVICE((return bit_count_device(i);))
+  KOKKOS_IF_ON_HOST((return bit_count_host(i);))
+}
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma pop
+#endif
+
+KOKKOS_INLINE_FUNCTION
+unsigned integral_power_of_two_that_contains(const unsigned N) {
+  const unsigned i = int_log2(N);
+  return ((1u << i) < N) ? i + 1 : i;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_BITOPS_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_CPUDiscovery.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_CPUDiscovery.cpp
new file mode 100644 (file)
index 0000000..a8fc928
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#elif defined(__APPLE__)
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#else
+#include <unistd.h>
+#endif
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <cerrno>
+#include <string>
+
+namespace Kokkos {
+namespace Impl {
+
+int processors_per_node() {
+#ifdef _SC_NPROCESSORS_ONLN
+  int const num_procs     = sysconf(_SC_NPROCESSORS_ONLN);
+  int const num_procs_max = sysconf(_SC_NPROCESSORS_CONF);
+  if ((num_procs < 1) || (num_procs_max < 1)) {
+    return -1;
+  }
+  return num_procs;
+#elif defined(__APPLE__)
+  int ncpu;
+  int activecpu;
+  size_t size = sizeof(int);
+  sysctlbyname("hw.ncpu", &ncpu, &size, nullptr, 0);
+  sysctlbyname("hw.activecpu", &activecpu, &size, nullptr, 0);
+  if (ncpu < 1 || activecpu < 1)
+    return -1;
+  else
+    return activecpu;
+#else
+  return -1;
+#endif
+}
+
+int mpi_ranks_per_node() {
+  char *str;
+  int ppn = 1;
+  // if ((str = getenv("SLURM_TASKS_PER_NODE"))) {
+  //  ppn = std::stoi(str);
+  //  if(ppn<=0) ppn = 1;
+  //}
+  if ((str = getenv("MV2_COMM_WORLD_LOCAL_SIZE"))) {
+    ppn = std::stoi(str);
+    if (ppn <= 0) ppn = 1;
+  }
+  if ((str = getenv("OMPI_COMM_WORLD_LOCAL_SIZE"))) {
+    ppn = std::stoi(str);
+    if (ppn <= 0) ppn = 1;
+  }
+  return ppn;
+}
+
+int mpi_local_rank_on_node() {
+  char *str;
+  int local_rank = 0;
+  // if ((str = getenv("SLURM_LOCALID"))) {
+  //  local_rank = std::stoi(str);
+  //}
+  if ((str = getenv("MV2_COMM_WORLD_LOCAL_RANK"))) {
+    local_rank = std::stoi(str);
+  }
+  if ((str = getenv("OMPI_COMM_WORLD_LOCAL_RANK"))) {
+    local_rank = std::stoi(str);
+  }
+  return local_rank;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_CPUDiscovery.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_CPUDiscovery.hpp
new file mode 100644 (file)
index 0000000..6ba14c8
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+namespace Kokkos {
+namespace Impl {
+
+int processors_per_node();
+int mpi_ranks_per_node();
+int mpi_local_rank_on_node();
+
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ChaseLev.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ChaseLev.hpp
new file mode 100644 (file)
index 0000000..1a372d8
--- /dev/null
@@ -0,0 +1,312 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_LOCKFREEDEQUE_HPP
+#define KOKKOS_IMPL_LOCKFREEDEQUE_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_TASKDAG
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_PointerOwnership.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_Error.hpp>           // KOKKOS_EXPECTS
+#include <impl/Kokkos_LinkedListNode.hpp>  // KOKKOS_EXPECTS
+
+#include <Kokkos_Atomic.hpp>  // atomic_compare_exchange, atomic_fence
+#include "Kokkos_LIFO.hpp"
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class NodeType, size_t CircularBufferSize, class SizeType = size_t>
+struct fixed_size_circular_buffer {
+ public:
+  using node_type = NodeType;
+  using size_type = SizeType;
+
+ private:
+  node_type* m_buffer[CircularBufferSize] = {nullptr};
+
+ public:
+  fixed_size_circular_buffer()                                  = default;
+  fixed_size_circular_buffer(fixed_size_circular_buffer const&) = delete;
+  fixed_size_circular_buffer(fixed_size_circular_buffer&&)      = default;
+  fixed_size_circular_buffer& operator=(fixed_size_circular_buffer const&) =
+      delete;
+  fixed_size_circular_buffer& operator=(fixed_size_circular_buffer&&) = default;
+  ~fixed_size_circular_buffer()                                       = default;
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  static constexpr size_type size() noexcept {
+    return size_type(CircularBufferSize);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  node_type* operator[](size_type idx) const noexcept {
+    return m_buffer[idx % size()];
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  node_type*& operator[](size_type idx) noexcept {
+    return m_buffer[idx % size()];
+  }
+};
+
+template <class NodeType, class SizeType = size_t>
+struct non_owning_variable_size_circular_buffer {
+ public:
+  using node_type = NodeType;
+  using size_type = SizeType;
+
+ private:
+  ObservingRawPtr<node_type*> m_buffer = nullptr;
+  size_type m_size                     = 0;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  non_owning_variable_size_circular_buffer(ObservingRawPtr<node_type*> buffer,
+                                           size_type arg_size) noexcept
+      : m_buffer(buffer), m_size(arg_size) {}
+
+  non_owning_variable_size_circular_buffer() = default;
+  non_owning_variable_size_circular_buffer(
+      non_owning_variable_size_circular_buffer const&) = delete;
+  non_owning_variable_size_circular_buffer(
+      non_owning_variable_size_circular_buffer&&)      = default;
+  non_owning_variable_size_circular_buffer& operator   =(
+      non_owning_variable_size_circular_buffer const&) = delete;
+  non_owning_variable_size_circular_buffer& operator   =(
+      non_owning_variable_size_circular_buffer&&) = default;
+  ~non_owning_variable_size_circular_buffer()          = default;
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  constexpr size_type size() const noexcept { return m_size; }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  node_type* operator[](size_type idx) const noexcept {
+    return m_buffer[idx % size()];
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  node_type*& operator[](size_type idx) noexcept {
+    return m_buffer[idx % size()];
+  }
+};
+
+/** Based on "Correct and Efficient Work-Stealing for Weak Memory Models,"
+ * PPoPP '13, https://www.di.ens.fr/~zappa/readings/ppopp13.pdf
+ *
+ */
+template <class T, class CircularBufferT, class SizeType = int32_t>
+struct ChaseLevDeque {
+ public:
+  using size_type  = SizeType;
+  using value_type = T;
+  // Still using intrusive linked list for waiting queue
+  using node_type = SimpleSinglyLinkedListNode<>;
+
+ private:
+  // TODO @tasking @new_feature DSH variable size circular buffer?
+
+  CircularBufferT m_array;
+  size_type m_top    = 0;
+  size_type m_bottom = 0;
+
+ public:
+  template <class _ignore = void,
+            class         = std::enable_if_t<
+                std::is_default_constructible<CircularBufferT>::value>>
+  ChaseLevDeque() : m_array() {}
+
+  explicit ChaseLevDeque(CircularBufferT buffer) : m_array(std::move(buffer)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  bool empty() const {
+    // TODO @tasking @memory_order DSH memory order
+    return m_top > m_bottom - 1;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<T> pop() {
+    auto b   = m_bottom - 1;  // atomic load relaxed
+    auto& a  = m_array;       // atomic load relaxed
+    m_bottom = b;             // atomic store relaxed
+    Kokkos::memory_fence();   // memory order seq_cst
+    auto t = m_top;           // atomic load relaxed
+    OptionalRef<T> return_value;
+    if (t <= b) {
+      /* non-empty queue */
+      return_value = *static_cast<T*>(a[b]);  // relaxed load
+      if (t == b) {
+        /* single last element in the queue. */
+#ifdef _WIN32
+        Kokkos::memory_fence();
+        bool const success =
+            Kokkos::atomic_compare_exchange_strong(&m_top, t, t + 1);
+        Kokkos::memory_fence();
+        if (!success) {
+          return_value = nullptr;
+        }
+#else
+        if (!Impl::atomic_compare_exchange_strong(
+                &m_top, t, t + 1, memory_order_seq_cst, memory_order_relaxed)) {
+          /* failed race, someone else stole it */
+          return_value = nullptr;
+        }
+#endif
+        m_bottom = b + 1;  // memory order relaxed
+      }
+    } else {
+      /* empty queue */
+      m_bottom = b + 1;  // memory order relaxed
+    }
+    return return_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool push(node_type&& node) {
+    // Just forward to the lvalue version
+    return push(node);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool push(node_type& node) {
+    auto b  = m_bottom;  // memory order relaxed
+    auto t  = Impl::atomic_load(&m_top, memory_order_acquire);
+    auto& a = m_array;
+    if (b - t > a.size() - 1) {
+      /* queue is full, resize */
+      // m_array = a->grow();
+      // a = m_array;
+      return false;
+    }
+    a[b] = &node;  // relaxed
+    Impl::atomic_store(&m_bottom, b + 1, memory_order_release);
+    return true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<T> steal() {
+    auto t = m_top;  // TODO @tasking @memory_order DSH: atomic load acquire
+    Kokkos::memory_fence();  // seq_cst fence, so why does the above need to be
+                             // acquire?
+    auto b = Impl::atomic_load(&m_bottom, memory_order_acquire);
+    OptionalRef<T> return_value;
+    if (t < b) {
+      /* Non-empty queue */
+      auto& a = m_array;     // TODO @tasking @memory_order DSH: technically
+                             // consume ordered, but acquire should be fine
+      Kokkos::load_fence();  // TODO @tasking @memory_order DSH memory order
+                             // instead of fence
+      return_value = *static_cast<T*>(a[t]);  // relaxed
+#ifdef _WIN32
+      Kokkos::memory_fence();
+      bool const success =
+          Kokkos::atomic_compare_exchange_strong(&m_top, t, t + 1);
+      Kokkos::memory_fence();
+      if (!success) {
+        return_value = nullptr;
+      }
+#else
+      if (!Impl::atomic_compare_exchange_strong(
+              &m_top, t, t + 1, memory_order_seq_cst, memory_order_relaxed)) {
+        return_value = nullptr;
+      }
+#endif
+    }
+    return return_value;
+  }
+};
+
+/*
+      // The atomicity of this load was more important in the paper's version
+      // because that version had a circular buffer that could grow.  We're
+      // essentially using the memory order in this version as a fence, which
+      // may be unnecessary
+      auto buffer_ptr = (node_type***)&m_array.buffer;
+      auto a = Impl::atomic_load(buffer_ptr, memory_order_acquire); //
+   technically consume ordered, but acquire should be fine return_value =
+   *static_cast<T*>(a[t % m_array->size]); // relaxed; we'd have to replace the
+   m_array->size if we ever allow growth
+*/
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <size_t CircularBufferSize>
+struct TaskQueueTraitsChaseLev {
+  template <class Task>
+  using ready_queue_type =
+      ChaseLevDeque<Task,
+                    fixed_size_circular_buffer<SimpleSinglyLinkedListNode<>,
+                                               CircularBufferSize, int32_t>,
+                    int32_t>;
+
+  template <class Task>
+  using waiting_queue_type = SingleConsumeOperationLIFO<Task>;
+
+  template <class Task>
+  using intrusive_task_base_type = typename ready_queue_type<Task>::node_type;
+
+  static constexpr auto ready_queue_insertion_may_fail = true;
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* defined KOKKOS_ENABLE_TASKDAG */
+#endif /* #ifndef KOKKOS_IMPL_LOCKFREEDEQUE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ClockTic.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ClockTic.hpp
new file mode 100644 (file)
index 0000000..c1cb6a7
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CLOCKTIC_HPP
+#define KOKKOS_CLOCKTIC_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <stdint.h>
+#include <chrono>
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+#include <omp.h>
+#endif
+
+// To use OpenCL(TM) built-in intrinsics inside kernels, we have to
+// forward-declare their prototype, also see
+// https://github.com/intel/pti-gpu/blob/master/chapters/binary_instrumentation/OpenCLBuiltIn.md
+#if defined(KOKKOS_ENABLE_SYCL) && defined(KOKKOS_ARCH_INTEL_GPU) && \
+    defined(__SYCL_DEVICE_ONLY__)
+extern SYCL_EXTERNAL unsigned long __attribute__((overloadable))
+intel_get_cycle_counter();
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+/**\brief  Quick query of clock register tics
+ *
+ *  Primary use case is to, with low overhead,
+ *  obtain a integral value that consistently varies
+ *  across concurrent threads of execution within
+ *  a parallel algorithm.
+ *  This value is often used to "randomly" seed an
+ *  attempt to acquire an indexed resource (e.g., bit)
+ *  from an array of resources (e.g., bitset) such that
+ *  concurrent threads will have high likelihood of
+ *  having different index-seed values.
+ */
+
+KOKKOS_IMPL_DEVICE_FUNCTION inline uint64_t clock_tic_device() noexcept {
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP)
+
+  // Return value of 64-bit hi-res clock register.
+  return clock64();
+
+#elif defined(KOKKOS_ENABLE_SYCL) && defined(KOKKOS_ARCH_INTEL_GPU) && \
+    defined(__SYCL_DEVICE_ONLY__)
+
+  return intel_get_cycle_counter();
+
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
+
+  return omp_get_wtime() * 1.e9;
+
+#else
+
+  return 0;
+
+#endif
+}
+
+KOKKOS_IMPL_HOST_FUNCTION inline uint64_t clock_tic_host() noexcept {
+#if defined(__i386__) || defined(__x86_64)
+
+  // Return value of 64-bit hi-res clock register.
+
+  unsigned a = 0, d = 0;
+
+  __asm__ volatile("rdtsc" : "=a"(a), "=d"(d));
+
+  return ((uint64_t)a) | (((uint64_t)d) << 32);
+
+#elif defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) || \
+    defined(__POWERPC__) || defined(__ppc__) || defined(__ppc64__)
+
+  unsigned int cycles = 0;
+
+  asm volatile("mftb %0" : "=r"(cycles));
+
+  return (uint64_t)cycles;
+
+#else
+
+  return std::chrono::high_resolution_clock::now().time_since_epoch().count();
+
+#endif
+}
+
+KOKKOS_FORCEINLINE_FUNCTION
+uint64_t clock_tic() noexcept {
+  KOKKOS_IF_ON_DEVICE((return clock_tic_device();))
+  KOKKOS_IF_ON_HOST((return clock_tic_host();))
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_CLOCKTIC_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Combined_Reducer.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Combined_Reducer.hpp
new file mode 100644 (file)
index 0000000..21a2029
--- /dev/null
@@ -0,0 +1,682 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_COMBINED_REDUCER_HPP
+#define KOKKOS_COMBINED_REDUCER_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_Parallel_Reduce.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <Kokkos_AnonymousSpace.hpp>
+#include <impl/Kokkos_Utilities.hpp>  // comma operator fold emulation
+
+#include <utility>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="CombinedReducer reducer and value storage helpers"> {{{1
+
+// Note: the index is only to avoid repeating the same base class multiple times
+template <size_t /*Idx*/, class ValueType>
+struct CombinedReducerValueItemImpl {
+ public:
+  using value_type = ValueType;
+
+ private:
+  value_type m_value;
+
+ public:
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerValueItemImpl() = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerValueItemImpl(
+      CombinedReducerValueItemImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerValueItemImpl(
+      CombinedReducerValueItemImpl&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerValueItemImpl& operator=(
+      CombinedReducerValueItemImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerValueItemImpl& operator=(
+      CombinedReducerValueItemImpl&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  ~CombinedReducerValueItemImpl() = default;
+  explicit KOKKOS_FUNCTION CombinedReducerValueItemImpl(value_type arg_value)
+      : m_value(std::move(arg_value)) {}
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  constexpr value_type& ref() & noexcept { return m_value; }
+  KOKKOS_FORCEINLINE_FUNCTION
+  constexpr value_type const& ref() const& noexcept { return m_value; }
+};
+
+//==============================================================================
+
+template <class IdxSeq, class... ValueTypes>
+struct CombinedReducerValueImpl;
+
+template <size_t... Idxs, class... ValueTypes>
+struct CombinedReducerValueImpl<std::integer_sequence<size_t, Idxs...>,
+                                ValueTypes...>
+    : CombinedReducerValueItemImpl<Idxs, ValueTypes>... {
+ public:
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReducerValueImpl() = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReducerValueImpl(CombinedReducerValueImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReducerValueImpl(CombinedReducerValueImpl&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReducerValueImpl& operator=(
+      CombinedReducerValueImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReducerValueImpl& operator=(CombinedReducerValueImpl&&) =
+      default;
+  KOKKOS_DEFAULTED_FUNCTION
+  ~CombinedReducerValueImpl() = default;
+
+  KOKKOS_FUNCTION
+  explicit CombinedReducerValueImpl(ValueTypes... arg_values)
+      : CombinedReducerValueItemImpl<Idxs, ValueTypes>(
+            std::move(arg_values))... {}
+
+  template <size_t Idx, class ValueType>
+      KOKKOS_INLINE_FUNCTION ValueType& get() & noexcept {
+    return this->CombinedReducerValueItemImpl<Idx, ValueType>::ref();
+  }
+  template <size_t Idx, class ValueType>
+  KOKKOS_INLINE_FUNCTION ValueType const& get() const& noexcept {
+    return this->CombinedReducerValueItemImpl<Idx, ValueType>::ref();
+  }
+};
+
+//==============================================================================
+
+// TODO Empty base optmization?
+template <size_t /*Idx*/, class Reducer>
+// requires Kokkos::is_reducer<Reducer>
+struct CombinedReducerStorageImpl {
+ public:
+  using value_type = typename Reducer::value_type;
+
+ private:
+  Reducer m_reducer;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  explicit constexpr CombinedReducerStorageImpl(Reducer arg_reducer)
+      : m_reducer(std::move(arg_reducer)) {}
+
+  // Leading underscores to make it clear that this class is not intended to
+  // model Reducer
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr _fold_comma_emulation_return _init(value_type& val) const {
+    m_reducer.init(val);
+    return _fold_comma_emulation_return{};
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr _fold_comma_emulation_return _join(
+      value_type& dest, value_type const& src) const {
+    m_reducer.join(dest, src);
+    return _fold_comma_emulation_return{};
+  }
+};
+
+// </editor-fold> end CombinedReducerStorage }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="CombinedReducer"> {{{1
+
+struct _construct_combined_reducer_from_args_tag {};
+
+template <class T>
+KOKKOS_INLINE_FUNCTION auto _get_value_from_combined_reducer_ctor_arg(
+    T&& arg) noexcept
+    -> std::enable_if_t<!is_view<std::decay_t<T>>::value &&
+                            !is_reducer<std::decay_t<T>>::value,
+                        std::decay_t<T>> {
+  return arg;
+}
+
+template <class T>
+KOKKOS_INLINE_FUNCTION auto _get_value_from_combined_reducer_ctor_arg(
+    T&&) noexcept ->
+    typename std::enable_if_t<is_view<std::decay_t<T>>::value ||
+                                  is_reducer<std::decay_t<T>>::value,
+                              std::decay_t<T>>::value_type {
+  return typename std::decay_t<T>::value_type{};
+}
+
+template <class IdxSeq, class Space, class...>
+struct CombinedReducerImpl;
+
+template <size_t... Idxs, class Space, class... Reducers>
+struct CombinedReducerImpl<std::integer_sequence<size_t, Idxs...>, Space,
+                           Reducers...>
+    : private CombinedReducerStorageImpl<Idxs, Reducers>... {
+ public:
+  using reducer = CombinedReducerImpl<std::integer_sequence<size_t, Idxs...>,
+                                      Space, Reducers...>;
+  using value_type =
+      CombinedReducerValueImpl<std::integer_sequence<size_t, Idxs...>,
+                               typename Reducers::value_type...>;
+  using result_view_type =
+      Kokkos::View<value_type, Space, Kokkos::MemoryUnmanaged>;
+
+ private:
+  result_view_type m_value_view;
+
+ public:
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl() = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl(
+      CombinedReducerImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl(
+      CombinedReducerImpl&&)                                       = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl& operator=(
+      CombinedReducerImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION constexpr CombinedReducerImpl& operator=(
+      CombinedReducerImpl&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION ~CombinedReducerImpl() = default;
+
+  template <class... ReducersDeduced>
+  KOKKOS_FUNCTION constexpr explicit CombinedReducerImpl(
+      value_type& value, ReducersDeduced&&... reducers) noexcept
+      : CombinedReducerStorageImpl<Idxs, Reducers>((ReducersDeduced &&)
+                                                       reducers)...,
+        m_value_view(&value) {}
+
+  KOKKOS_FUNCTION constexpr void join(value_type& dest,
+                                      value_type const& src) const noexcept {
+    emulate_fold_comma_operator(
+        this->CombinedReducerStorageImpl<Idxs, Reducers>::_join(
+            dest.template get<Idxs, typename Reducers::value_type>(),
+            src.template get<Idxs, typename Reducers::value_type>())...);
+  }
+
+  KOKKOS_FUNCTION constexpr void init(value_type& dest) const noexcept {
+    emulate_fold_comma_operator(
+        this->CombinedReducerStorageImpl<Idxs, Reducers>::_init(
+            dest.template get<Idxs, typename Reducers::value_type>())...);
+  }
+
+  // TODO figure out if we also need to call through to final
+
+  KOKKOS_FUNCTION
+  constexpr bool references_scalar() const noexcept {
+    // For now, always pretend that we reference a scalar since we need to
+    // block to do the write-back because the references may not be contiguous
+    // in memory and the backends currently assume this and just do a single
+    // deep copy back to a chunk of memory associated with the output argument
+    return true;
+  }
+
+  KOKKOS_FUNCTION
+  constexpr result_view_type const& view() const noexcept {
+    return m_value_view;
+  }
+
+  template <class ExecutionSpace, int Idx, class View>
+  static void write_one_value_back(
+      const ExecutionSpace& exec_space, View const& view,
+      typename View::const_value_type& value) noexcept {
+    if (Kokkos::SpaceAccessibility<typename View::memory_space,
+                                   Space>::assignable)
+      view() = value;
+    else
+      Kokkos::deep_copy(exec_space, view, value);
+  }
+
+  template <class ExecutionSpace>
+  static void write_value_back_to_original_references(
+      const ExecutionSpace& exec_space, value_type const& value,
+      Reducers const&... reducers_that_reference_original_values) noexcept {
+    emulate_fold_comma_operator(
+        (write_one_value_back<ExecutionSpace, Idxs>(
+             exec_space, reducers_that_reference_original_values.view(),
+             value.template get<Idxs, typename Reducers::value_type>()),
+         0)...);
+  }
+};
+
+// Apparently this can't be an alias template because of a bug/unimplemented
+// feature in GCC's name mangler.  But in this case, this amounts to the same
+// thing.
+template <class Space, class... Reducers>
+struct CombinedReducer
+    : CombinedReducerImpl<std::make_index_sequence<sizeof...(Reducers)>, Space,
+                          Reducers...> {
+  using base_t =
+      CombinedReducerImpl<std::make_index_sequence<sizeof...(Reducers)>, Space,
+                          Reducers...>;
+  using base_t::base_t;
+  using reducer = CombinedReducer<Space, Reducers...>;
+};
+
+// </editor-fold> end CombinedReducer }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="CombinedReductionFunctorWrapper"> {{{1
+
+template <class IdxSeq, class Functor, class Space, class... Reducers>
+struct CombinedReductionFunctorWrapperImpl;
+
+template <size_t... Idxs, class Functor, class Space, class... Reducers>
+struct CombinedReductionFunctorWrapperImpl<
+    std::integer_sequence<size_t, Idxs...>, Functor, Space, Reducers...> {
+ private:
+  Functor m_functor;
+
+ public:
+  //------------------------------------------------------------------------------
+  // <editor-fold desc="type aliases"> {{{2
+
+  using reducer_type = CombinedReducer<Space, Reducers...>;
+
+  // Prevent Kokkos from attempting to deduce value_type
+  using value_type = typename reducer_type::value_type;
+
+  // </editor-fold> end type aliases }}}2
+  //------------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Ctors, destructor, and assignment"> {{{2
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReductionFunctorWrapperImpl() noexcept = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReductionFunctorWrapperImpl(
+      CombinedReductionFunctorWrapperImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReductionFunctorWrapperImpl(
+      CombinedReductionFunctorWrapperImpl&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReductionFunctorWrapperImpl& operator=(
+      CombinedReductionFunctorWrapperImpl const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr CombinedReductionFunctorWrapperImpl& operator=(
+      CombinedReductionFunctorWrapperImpl&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  ~CombinedReductionFunctorWrapperImpl() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr explicit CombinedReductionFunctorWrapperImpl(Functor arg_functor)
+      : m_functor(std::move(arg_functor)) {}
+
+  // </editor-fold> end Ctors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="call operator"> {{{2
+
+  // Variadic version for MDRangePolicy
+  // There are a number of ways to do this, but most of them that involve
+  // not assuming an implementation of tuple is available are gross.
+  // Unfortunately, that's what we have to do here
+  template <class IndexOrMemberOrTagType1,
+            class... IndexOrMemberTypesThenValueType>
+  KOKKOS_FUNCTION void operator()(
+      IndexOrMemberOrTagType1&& arg_first,
+      IndexOrMemberTypesThenValueType&&... args) const {
+    this->template _call_op_impl<IndexOrMemberOrTagType1&&>(
+        (IndexOrMemberOrTagType1 &&) arg_first,
+        (IndexOrMemberTypesThenValueType &&) args...);
+  }
+
+  // </editor-fold> end call operator }}}2
+  //----------------------------------------------------------------------------
+
+  // These are things that need to be done if we decide to ever support
+  // functor-customized join/init/final hooks with combined reducers. For now,
+  // they are explicitly not supported.
+  // TODO: forward join() function to user functor hook, or just ignore it?
+  // TODO: forward init() function to user functor hook, or just ignore it?
+  // TODO: forward final() function to user functor hook, or just ignore it?
+
+ private:
+  // variadic forwarding for MDRangePolicy
+  // see comment above for why this has to be so gross
+  // recursive case
+  template <class... IdxOrMemberTypes, class IdxOrMemberType1,
+            class... IdxOrMemberTypesThenValueType>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<
+      !std::is_same<remove_cvref_t<IdxOrMemberType1>, value_type>::value>
+  _call_op_impl(IdxOrMemberTypes&&... idxs, IdxOrMemberType1&& idx,
+                IdxOrMemberTypesThenValueType&&... args) const {
+    this->template _call_op_impl<IdxOrMemberTypes&&..., IdxOrMemberType1&&>(
+        (IdxOrMemberTypes &&) idxs..., (IdxOrMemberType1 &&) idx,
+        (IdxOrMemberTypesThenValueType &&) args...);
+  }
+
+  // base case
+  template <class... IdxOrMemberTypes>
+  KOKKOS_FORCEINLINE_FUNCTION void _call_op_impl(IdxOrMemberTypes&&... idxs,
+                                                 value_type& out) const {
+    m_functor((IdxOrMemberTypes &&) idxs...,
+              out.template get<Idxs, typename Reducers::value_type>()...);
+  }
+};
+
+template <class Functor, class Space, class... Reducers>
+struct CombinedReductionFunctorWrapper
+    : CombinedReductionFunctorWrapperImpl<
+          std::make_index_sequence<sizeof...(Reducers)>, Functor, Space,
+          Reducers...> {
+  using base_t = CombinedReductionFunctorWrapperImpl<
+      std::make_index_sequence<sizeof...(Reducers)>, Functor, Space,
+      Reducers...>;
+  using base_t::base_t;
+};
+
+// </editor-fold> end CombinedReductionFunctorWrapper }}}1
+//==============================================================================
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="_make_reducer_from_arg"> {{{2
+
+template <class Space, class Reducer>
+KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<
+    Kokkos::is_reducer<std::decay_t<Reducer>>::value, std::decay_t<Reducer>>
+_make_reducer_from_arg(Reducer&& arg_reducer) noexcept {
+  return arg_reducer;
+}
+
+// Two purposes: SFINAE-safety for the `View` case and laziness for the return
+// value otherwise to prevent extra instantiations of the Kokkos::Sum template
+template <class Space, class T, class Enable = void>
+struct _wrap_with_kokkos_sum {
+  using type = Kokkos::Sum<T, Space>;
+};
+
+template <class Space, class T>
+struct _wrap_with_kokkos_sum<Space, T,
+                             std::enable_if_t<Kokkos::is_view<T>::value>> {
+  using type = Kokkos::Sum<typename T::value_type, typename T::memory_space>;
+};
+
+// TODO better error message for the case when a const& to a scalar is passed in
+//      (this is needed in general, though)
+template <class Space, class T>
+KOKKOS_INLINE_FUNCTION constexpr typename std::enable_if_t<
+    !Kokkos::is_reducer<std::decay_t<T>>::value,
+    _wrap_with_kokkos_sum<Space, std::decay_t<T>>>::type
+_make_reducer_from_arg(T&& arg_scalar) noexcept {
+  return
+      typename _wrap_with_kokkos_sum<Space, std::decay_t<T>>::type{arg_scalar};
+}
+
+// This can't be an alias template because GCC doesn't know how to mangle
+// decltype expressions in return statements (and, even though every compiler
+// is supposed to, GCC is the only one that does dependent alias template
+// substitution correctly and tries to do the mangling, aparently).
+template <class Space, class ReferenceOrViewOrReducer, class = void>
+struct _reducer_from_arg {
+  using type = decltype(Impl::_make_reducer_from_arg<Space>(
+      std::declval<ReferenceOrViewOrReducer&&>()));
+};
+template <class Space, class ReferenceOrViewOrReducer>
+using _reducer_from_arg_t =
+    typename _reducer_from_arg<Space, ReferenceOrViewOrReducer>::type;
+
+// </editor-fold> end _make_reducer_from_arg }}}2
+//------------------------------------------------------------------------------
+
+template <class Space, class... ReferencesOrViewsOrReducers>
+KOKKOS_INLINE_FUNCTION constexpr auto make_combined_reducer_value(
+    ReferencesOrViewsOrReducers&&... args) {
+  //----------------------------------------
+  // This is a bit round-about and we should make sure it doesn't have
+  // any performance implications. Basically, we make a reducer out of anything
+  // just to get the value back out here (for the sake of uniformity). Most
+  // compilers should figure out what's going on, but we should double-check
+  // that.
+  return CombinedReducerValueImpl<
+      std::make_index_sequence<sizeof...(ReferencesOrViewsOrReducers)>,
+      typename _reducer_from_arg_t<Space,
+                                   ReferencesOrViewsOrReducers>::value_type...>{
+      // This helper function is now poorly named after refactoring.
+      _get_value_from_combined_reducer_ctor_arg((ReferencesOrViewsOrReducers &&)
+                                                    args)...};
+  //----------------------------------------
+}
+
+template <class Space, class ValueType, class... ReferencesOrViewsOrReducers>
+KOKKOS_INLINE_FUNCTION constexpr auto make_combined_reducer(
+    ValueType& value, ReferencesOrViewsOrReducers&&... args) {
+  //----------------------------------------
+  // This is doing more or less the same thing of making every argument into
+  // a reducer, just in a different place than in `make_combined_reducer_value`,
+  // so we should probably eventually make this read a little more similarly
+  using reducer_type = CombinedReducer<
+      Space, _reducer_from_arg_t<Space, ReferencesOrViewsOrReducers>...>;
+  return reducer_type(value,
+                      _reducer_from_arg_t<Space, ReferencesOrViewsOrReducers>{
+                          (ReferencesOrViewsOrReducers &&) args}...);
+  //----------------------------------------
+}
+
+template <class Functor, class Space, class... ReferencesOrViewsOrReducers>
+KOKKOS_INLINE_FUNCTION constexpr auto make_wrapped_combined_functor(
+    Functor const& functor, Space, ReferencesOrViewsOrReducers&&...) {
+  //----------------------------------------
+  return CombinedReductionFunctorWrapper<
+      Functor, Space,
+      _reducer_from_arg_t<Space, ReferencesOrViewsOrReducers>...>(functor);
+  //----------------------------------------
+}
+
+template <typename FunctorType>
+using functor_has_value_t = typename FunctorType::value_type;
+}  // end namespace Impl
+
+//==============================================================================
+// <editor-fold desc="Overloads of parallel_reduce for multiple outputs"> {{{1
+
+// These need to be forwarding references so that we can deduce const-ness,
+// but none of them should be forwarded (and, indeed, none of them should be
+// rvalue references)
+template <class PolicyType, class Functor, class ReturnType1, class ReturnType2,
+          class... ReturnTypes>
+auto parallel_reduce(std::string const& label, PolicyType const& policy,
+                     Functor const& functor, ReturnType1&& returnType1,
+                     ReturnType2&& returnType2,
+                     ReturnTypes&&... returnTypes) noexcept
+    -> std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value> {
+  //----------------------------------------
+  // Since we don't support asynchronous combined reducers yet for various
+  // reasons, we actually just want to work with the pointers and references
+  // directly
+  using space_type = Kokkos::DefaultHostExecutionSpace::memory_space;
+
+  auto value = Impl::make_combined_reducer_value<space_type>(
+      returnType1, returnType2, returnTypes...);
+
+  using combined_reducer_type = Impl::CombinedReducer<
+      space_type, Impl::_reducer_from_arg_t<space_type, ReturnType1>,
+      Impl::_reducer_from_arg_t<space_type, ReturnType2>,
+      Impl::_reducer_from_arg_t<space_type, ReturnTypes>...>;
+  auto combined_reducer = Impl::make_combined_reducer<space_type>(
+      value, returnType1, returnType2, returnTypes...);
+
+  auto combined_functor = Impl::make_wrapped_combined_functor(
+      functor, space_type{}, returnType1, returnType2, returnTypes...);
+
+  using combined_functor_type = decltype(combined_functor);
+  static_assert(
+      is_detected<Impl::functor_has_value_t, combined_functor_type>::value,
+      "value_type not properly detected");
+  using reduce_adaptor_t =
+      Impl::ParallelReduceAdaptor<PolicyType, combined_functor_type,
+                                  combined_reducer_type>;
+
+  reduce_adaptor_t::execute(label, policy, combined_functor, combined_reducer);
+  Impl::ParallelReduceFence<typename PolicyType::execution_space,
+                            combined_reducer_type>::
+      fence(
+          policy.space(),
+          "Kokkos::parallel_reduce: fence due to result being value, not view",
+          combined_reducer);
+  combined_reducer.write_value_back_to_original_references(
+      policy.space(), value,
+      Impl::_make_reducer_from_arg<space_type>(returnType1),
+      Impl::_make_reducer_from_arg<space_type>(returnType2),
+      Impl::_make_reducer_from_arg<space_type>(returnTypes)...);
+  policy.space().fence(
+      "Kokkos::parallel_reduce: fence after copying values back");
+  //----------------------------------------
+}
+
+template <class PolicyType, class Functor, class ReturnType1, class ReturnType2,
+          class... ReturnTypes>
+auto parallel_reduce(PolicyType const& policy, Functor const& functor,
+                     ReturnType1&& returnType1, ReturnType2&& returnType2,
+                     ReturnTypes&&... returnTypes) noexcept
+    -> std::enable_if_t<Kokkos::is_execution_policy<PolicyType>::value> {
+  //----------------------------------------
+  Kokkos::parallel_reduce("", policy, functor,
+                          std::forward<ReturnType1>(returnType1),
+                          std::forward<ReturnType2>(returnType2),
+                          std::forward<ReturnTypes>(returnTypes)...);
+  //----------------------------------------
+}
+
+template <class Functor, class ReturnType1, class ReturnType2,
+          class... ReturnTypes>
+void parallel_reduce(std::string const& label, size_t n, Functor const& functor,
+                     ReturnType1&& returnType1, ReturnType2&& returnType2,
+                     ReturnTypes&&... returnTypes) noexcept {
+  Kokkos::parallel_reduce(label,
+                          RangePolicy<Kokkos::DefaultExecutionSpace>(0, n),
+                          functor, std::forward<ReturnType1>(returnType1),
+                          std::forward<ReturnType2>(returnType2),
+                          std::forward<ReturnTypes>(returnTypes)...);
+}
+
+template <class Functor, class ReturnType1, class ReturnType2,
+          class... ReturnTypes>
+void parallel_reduce(size_t n, Functor const& functor,
+                     ReturnType1&& returnType1, ReturnType2&& returnType2,
+                     ReturnTypes&&... returnTypes) noexcept {
+  Kokkos::parallel_reduce("", n, functor,
+                          std::forward<ReturnType1>(returnType1),
+                          std::forward<ReturnType2>(returnType2),
+                          std::forward<ReturnTypes>(returnTypes)...);
+}
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="Team overloads"> {{{2
+
+// Copied three times because that's the best way we have right now to match
+// Impl::TeamThreadRangeBoundariesStruct,
+// Impl::ThreadVectorRangeBoundariesStruct, and
+// Impl::TeamVectorRangeBoundariesStruct.
+// TODO make these work after restructuring
+
+// template <class iType, class MemberType, class Functor, class ReturnType1,
+//          class ReturnType2, class... ReturnTypes>
+// KOKKOS_INLINE_FUNCTION void parallel_reduce(
+//    std::string const& label,
+//    Impl::TeamThreadRangeBoundariesStruct<iType, MemberType> const&
+//    boundaries, Functor const& functor, ReturnType1&& returnType1,
+//    ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
+//  const auto combined_reducer =
+//      Impl::make_combined_reducer<Kokkos::AnonymousSpace>(
+//          returnType1, returnType2, returnTypes...);
+//
+//  auto combined_functor = Impl::make_wrapped_combined_functor(
+//      functor, Kokkos::AnonymousSpace{}, returnType1, returnType2,
+//      returnTypes...);
+//
+//  parallel_reduce(label, boundaries, combined_functor, combined_reducer);
+//}
+//
+// template <class iType, class MemberType, class Functor, class ReturnType1,
+//          class ReturnType2, class... ReturnTypes>
+// KOKKOS_INLINE_FUNCTION void parallel_reduce(
+//    std::string const& label,
+//    Impl::ThreadVectorRangeBoundariesStruct<iType, MemberType> const&
+//        boundaries,
+//    Functor const& functor, ReturnType1&& returnType1,
+//    ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
+//  const auto combined_reducer =
+//      Impl::make_combined_reducer<Kokkos::AnonymousSpace>(
+//          returnType1, returnType2, returnTypes...);
+//
+//  auto combined_functor = Impl::make_wrapped_combined_functor(
+//      functor, Kokkos::AnonymousSpace{}, returnType1, returnType2,
+//      returnTypes...);
+//
+//  parallel_reduce(label, boundaries, combined_functor, combined_reducer);
+//}
+
+// template <class iType, class MemberType, class Functor, class ReturnType1,
+//          class ReturnType2, class... ReturnTypes>
+// KOKKOS_INLINE_FUNCTION void parallel_reduce(
+//    std::string const& label,
+//    Impl::TeamVectorRangeBoundariesStruct<iType, MemberType> const&
+//    boundaries, Functor const& functor, ReturnType1&& returnType1,
+//    ReturnType2&& returnType2, ReturnTypes&&... returnTypes) noexcept {
+//  const auto combined_reducer =
+//      Impl::make_combined_reducer<Kokkos::AnonymousSpace>(
+//          returnType1, returnType2, returnTypes...);
+//
+//  auto combined_functor = Impl::make_wrapped_combined_functor(
+//      functor, Kokkos::AnonymousSpace{}, returnType1, returnType2,
+//      returnTypes...);
+//
+//  parallel_reduce(label, boundaries, combined_functor, combined_reducer);
+//}
+
+// </editor-fold> end Team overloads }}}2
+//------------------------------------------------------------------------------
+
+// </editor-fold> end Overloads of parallel_reduce for multiple outputs }}}1
+//==============================================================================
+
+}  // namespace Kokkos
+
+#endif  // KOKKOS_COMBINED_REDUCER_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Command_Line_Parsing.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Command_Line_Parsing.cpp
new file mode 100644 (file)
index 0000000..ca56352
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <impl/Kokkos_Command_Line_Parsing.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+#include <cstring>
+#include <iostream>
+#include <regex>
+#include <string>
+#include <sstream>
+
+namespace {
+
+auto const regex_true = std::regex(
+    "(yes|true|1)", std::regex_constants::icase | std::regex_constants::egrep);
+
+auto const regex_false = std::regex(
+    "(no|false|0)", std::regex_constants::icase | std::regex_constants::egrep);
+
+}  // namespace
+
+bool Kokkos::Impl::is_unsigned_int(const char* str) {
+  const size_t len = strlen(str);
+  for (size_t i = 0; i < len; ++i) {
+    if (!isdigit(str[i])) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Kokkos::Impl::check_arg(char const* arg, char const* expected) {
+  std::size_t arg_len = std::strlen(arg);
+  std::size_t exp_len = std::strlen(expected);
+  if (arg_len < exp_len) return false;
+  if (std::strncmp(arg, expected, exp_len) != 0) return false;
+  if (arg_len == exp_len) return true;
+
+  if (std::isalnum(arg[exp_len]) || arg[exp_len] == '-' ||
+      arg[exp_len] == '_') {
+    return false;
+  }
+  return true;
+}
+
+bool Kokkos::Impl::check_env_bool(char const* name, bool& val) {
+  char const* var = std::getenv(name);
+
+  if (!var) {
+    return false;
+  }
+
+  if (std::regex_match(var, regex_true)) {
+    val = true;
+    return true;
+  }
+
+  if (!std::regex_match(var, regex_false)) {
+    std::stringstream ss;
+    ss << "Error: cannot convert environment variable '" << name << "=" << var
+       << "' to a boolean."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  val = false;
+  return true;
+}
+
+bool Kokkos::Impl::check_env_int(char const* name, int& val) {
+  char const* var = std::getenv(name);
+
+  if (!var) {
+    return false;
+  }
+
+  errno = 0;
+  char* var_end;
+  val = std::strtol(var, &var_end, 10);
+
+  if (var == var_end) {
+    std::stringstream ss;
+    ss << "Error: cannot convert environment variable '" << name << '=' << var
+       << "' to an integer."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  if (errno == ERANGE) {
+    std::stringstream ss;
+    ss << "Error: converted value for environment variable '" << name << '='
+       << var << "' falls out of range."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  return true;
+}
+
+bool Kokkos::Impl::check_arg_bool(char const* arg, char const* name,
+                                  bool& val) {
+  auto const len = std::strlen(name);
+  if (std::strncmp(arg, name, len) != 0) {
+    return false;
+  }
+  auto const arg_len = strlen(arg);
+  if (arg_len == len) {
+    val = true;  // --kokkos-foo without =BOOL interpreted as fool=true
+    return true;
+  }
+  if (arg_len <= len + 1 || arg[len] != '=') {
+    std::stringstream ss;
+    ss << "Error: command line argument '" << arg
+       << "' is not recognized as a valid boolean."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  std::advance(arg, len + 1);
+  if (std::regex_match(arg, regex_true)) {
+    val = true;
+    return true;
+  }
+  if (!std::regex_match(arg, regex_false)) {
+    std::stringstream ss;
+    ss << "Error: cannot convert command line argument '" << name << "=" << arg
+       << "' to a boolean."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+  val = false;
+  return true;
+}
+
+bool Kokkos::Impl::check_arg_int(char const* arg, char const* name, int& val) {
+  auto const len = std::strlen(name);
+  if (std::strncmp(arg, name, len) != 0) {
+    return false;
+  }
+  auto const arg_len = strlen(arg);
+  if (arg_len <= len + 1 || arg[len] != '=') {
+    std::stringstream ss;
+    ss << "Error: command line argument '" << arg
+       << "' is not recognized as a valid integer."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  std::advance(arg, len + 1);
+
+  errno = 0;
+  char* arg_end;
+  val = std::strtol(arg, &arg_end, 10);
+
+  if (arg == arg_end) {
+    std::stringstream ss;
+    ss << "Error: cannot convert command line argument '" << name << '=' << arg
+       << "' to an integer."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  if (errno == ERANGE) {
+    std::stringstream ss;
+    ss << "Error: converted value for command line argument '" << name << '='
+       << arg << "' falls out of range."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  return true;
+}
+
+bool Kokkos::Impl::check_arg_str(char const* arg, char const* name,
+                                 std::string& val) {
+  auto const len = std::strlen(name);
+  if (std::strncmp(arg, name, len) != 0) {
+    return false;
+  }
+  auto const arg_len = strlen(arg);
+  if (arg_len <= len + 1 || arg[len] != '=') {
+    std::stringstream ss;
+    ss << "Error: command line argument '" << arg
+       << "' is not recognized as a valid string."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  std::advance(arg, len + 1);
+
+  val = arg;
+  return true;
+}
+
+void Kokkos::Impl::warn_deprecated_environment_variable(
+    std::string deprecated) {
+  std::cerr << "Warning: environment variable '" << deprecated
+            << "' is deprecated."
+            << " Raised by Kokkos::initialize()." << std::endl;
+}
+
+void Kokkos::Impl::warn_deprecated_environment_variable(
+    std::string deprecated, std::string use_instead) {
+  std::cerr << "Warning: environment variable '" << deprecated
+            << "' is deprecated."
+            << " Use '" << use_instead << "' instead."
+            << " Raised by Kokkos::initialize()." << std::endl;
+}
+
+void Kokkos::Impl::warn_deprecated_command_line_argument(
+    std::string deprecated) {
+  std::cerr << "Warning: command line argument '" << deprecated
+            << "' is deprecated."
+            << " Raised by Kokkos::initialize()." << std::endl;
+}
+
+void Kokkos::Impl::warn_deprecated_command_line_argument(
+    std::string deprecated, std::string use_instead) {
+  std::cerr << "Warning: command line argument '" << deprecated
+            << "' is deprecated."
+            << " Use '" << use_instead << "' instead."
+            << " Raised by Kokkos::initialize()." << std::endl;
+}
+
+namespace {
+std::vector<std::regex> do_not_warn_regular_expressions{
+    std::regex{"--kokkos-tool.*", std::regex::egrep},
+};
+}
+
+void Kokkos::Impl::do_not_warn_not_recognized_command_line_argument(
+    std::regex ignore) {
+  do_not_warn_regular_expressions.push_back(std::move(ignore));
+}
+
+void Kokkos::Impl::warn_not_recognized_command_line_argument(
+    std::string not_recognized) {
+  for (auto const& ignore : do_not_warn_regular_expressions) {
+    if (std::regex_match(not_recognized, ignore)) {
+      return;
+    }
+  }
+  std::cerr << "Warning: command line argument '" << not_recognized
+            << "' is not recognized."
+            << " Raised by Kokkos::initialize()." << std::endl;
+}
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Command_Line_Parsing.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Command_Line_Parsing.hpp
new file mode 100644 (file)
index 0000000..b22bc3e
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_COMMAND_LINE_PARSING_HPP
+#define KOKKOS_COMMAND_LINE_PARSING_HPP
+
+#include <string>
+#include <regex>
+
+namespace Kokkos {
+namespace Impl {
+bool is_unsigned_int(const char* str);
+bool check_arg(char const* arg, char const* expected);
+bool check_arg_bool(char const* arg, char const* name, bool& val);
+bool check_arg_int(char const* arg, char const* name, int& val);
+bool check_arg_str(char const* arg, char const* name, std::string& val);
+bool check_env_bool(char const* name, bool& val);
+bool check_env_int(char const* name, int& val);
+void warn_deprecated_environment_variable(std::string deprecated);
+void warn_deprecated_environment_variable(std::string deprecated,
+                                          std::string use_instead);
+void warn_deprecated_command_line_argument(std::string deprecated);
+void warn_deprecated_command_line_argument(std::string deprecated,
+                                           std::string use_instead);
+void warn_not_recognized_command_line_argument(std::string not_recognized);
+void do_not_warn_not_recognized_command_line_argument(std::regex ignore);
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ConcurrentBitset.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ConcurrentBitset.hpp
new file mode 100644 (file)
index 0000000..dafe57f
--- /dev/null
@@ -0,0 +1,350 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CONCURRENTBITSET_HPP
+#define KOKKOS_CONCURRENTBITSET_HPP
+
+#include <stdint.h>
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_BitOps.hpp>
+#include <impl/Kokkos_ClockTic.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+struct concurrent_bitset {
+ public:
+  // 32 bits per integer value
+
+  enum : uint32_t { bits_per_int_lg2 = 5 };
+  enum : uint32_t { bits_per_int_mask = (1 << bits_per_int_lg2) - 1 };
+
+  // Buffer is uint32_t[ buffer_bound ]
+  //   [ uint32_t { state_header | used_count } , uint32_t bits[*] ]
+  //
+  //  Maximum bit count is 33 million (1u<<25):
+  //
+  //  - Maximum bit set size occupies 1 Mbyte
+  //
+  //  - State header can occupy bits [30-26]
+  //    which can be the bit_count_lg2
+  //
+  //  - Accept at least 33 million concurrent calls to 'acquire'
+  //    before risking an overflow race condition on a full bitset.
+
+  enum : uint32_t { max_bit_count_lg2 = 25 };
+  enum : uint32_t { max_bit_count = 1u << max_bit_count_lg2 };
+  enum : uint32_t { state_shift = 26 };
+  enum : uint32_t { state_used_mask = (1 << state_shift) - 1 };
+  enum : uint32_t { state_header_mask = uint32_t(0x001f) << state_shift };
+
+  KOKKOS_INLINE_FUNCTION static constexpr uint32_t buffer_bound_lg2(
+      uint32_t const bit_bound_lg2) noexcept {
+    return bit_bound_lg2 <= max_bit_count_lg2
+               ? 1 + (1u << (bit_bound_lg2 > bits_per_int_lg2
+                                 ? bit_bound_lg2 - bits_per_int_lg2
+                                 : 0))
+               : 0;
+  }
+
+  /**\brief  Initialize bitset buffer */
+  KOKKOS_INLINE_FUNCTION static constexpr uint32_t buffer_bound(
+      uint32_t const bit_bound) noexcept {
+    return bit_bound <= max_bit_count
+               ? 1 + (bit_bound >> bits_per_int_lg2) +
+                     (bit_bound & bits_per_int_mask ? 1 : 0)
+               : 0;
+  }
+
+  /**\brief  Claim any bit within the bitset bound.
+   *
+   *  Return : ( which_bit , bit_count )
+   *
+   *  if success then
+   *    bit_count is the atomic-count of claimed > 0
+   *    which_bit is the claimed bit >= 0
+   *  else if attempt failed due to filled buffer
+   *    bit_count == which_bit == -1
+   *  else if attempt failed due to non-matching state_header
+   *    bit_count == which_bit == -2
+   *  else if attempt failed due to max_bit_count_lg2 < bit_bound_lg2
+   *                             or invalid state_header
+   *                             or (1u << bit_bound_lg2) <= bit
+   *    bit_count == which_bit == -3
+   *  endif
+   *
+   *  Recommended to have hint
+   *    bit = Kokkos::Impl::clock_tic() & ((1u<<bit_bound_lg2) - 1)
+   */
+  KOKKOS_INLINE_FUNCTION static Kokkos::pair<int, int> acquire_bounded_lg2(
+      uint32_t volatile *const buffer, uint32_t const bit_bound_lg2,
+      uint32_t bit = 0 /* optional hint */
+      ,
+      uint32_t const state_header = 0 /* optional header */
+      ) noexcept {
+    using type = Kokkos::pair<int, int>;
+
+    const uint32_t bit_bound  = 1 << bit_bound_lg2;
+    const uint32_t word_count = bit_bound >> bits_per_int_lg2;
+
+    if ((max_bit_count_lg2 < bit_bound_lg2) ||
+        (state_header & ~state_header_mask) || (bit_bound < bit)) {
+      return type(-3, -3);
+    }
+
+    // Use potentially two fetch_add to avoid CAS loop.
+    // Could generate "racing" failure-to-acquire
+    // when is full at the atomic_fetch_add(+1)
+    // then a release occurs before the atomic_fetch_add(-1).
+
+    const uint32_t state = (uint32_t)Kokkos::atomic_fetch_add(
+        reinterpret_cast<volatile int *>(buffer), 1);
+
+    const uint32_t state_error = state_header != (state & state_header_mask);
+
+    const uint32_t state_bit_used = state & state_used_mask;
+
+    if (state_error || (bit_bound <= state_bit_used)) {
+      Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+      return state_error ? type(-2, -2) : type(-1, -1);
+    }
+
+    // Do not update bit until count is visible:
+
+    Kokkos::memory_fence();
+
+    // There is a zero bit available somewhere,
+    // now find the (first) available bit and set it.
+
+    while (1) {
+      const uint32_t word = bit >> bits_per_int_lg2;
+      const uint32_t mask = 1u << (bit & bits_per_int_mask);
+      const uint32_t prev = Kokkos::atomic_fetch_or(buffer + word + 1, mask);
+
+      if (!(prev & mask)) {
+        // Successfully claimed 'result.first' by
+        // atomically setting that bit.
+        return type(bit, state_bit_used + 1);
+      }
+
+      // Failed race to set the selected bit
+      // Find a new bit to try.
+
+      const int j = Kokkos::Impl::bit_first_zero(prev);
+
+      if (0 <= j) {
+        bit = (word << bits_per_int_lg2) | uint32_t(j);
+      } else {
+        bit = ((word + 1) < word_count ? ((word + 1) << bits_per_int_lg2) : 0) |
+              (bit & bits_per_int_mask);
+      }
+    }
+  }
+
+  /**\brief  Claim any bit within the bitset bound.
+   *
+   *  Return : ( which_bit , bit_count )
+   *
+   *  if success then
+   *    bit_count is the atomic-count of claimed > 0
+   *    which_bit is the claimed bit >= 0
+   *  else if attempt failed due to filled buffer
+   *    bit_count == which_bit == -1
+   *  else if attempt failed due to non-matching state_header
+   *    bit_count == which_bit == -2
+   *  else if attempt failed due to max_bit_count_lg2 < bit_bound_lg2
+   *                             or invalid state_header
+   *                             or bit_bound <= bit
+   *    bit_count == which_bit == -3
+   *  endif
+   *
+   *  Recommended to have hint
+   *    bit = Kokkos::Impl::clock_tic() % bit_bound
+   */
+  KOKKOS_INLINE_FUNCTION static Kokkos::pair<int, int> acquire_bounded(
+      uint32_t volatile *const buffer, uint32_t const bit_bound,
+      uint32_t bit = 0 /* optional hint */
+      ,
+      uint32_t const state_header = 0 /* optional header */
+      ) noexcept {
+    using type = Kokkos::pair<int, int>;
+
+    if ((max_bit_count < bit_bound) || (state_header & ~state_header_mask) ||
+        (bit_bound <= bit)) {
+      return type(-3, -3);
+    }
+
+    const uint32_t word_count = bit_bound >> bits_per_int_lg2;
+
+    // Use potentially two fetch_add to avoid CAS loop.
+    // Could generate "racing" failure-to-acquire
+    // when is full at the atomic_fetch_add(+1)
+    // then a release occurs before the atomic_fetch_add(-1).
+
+    const uint32_t state = (uint32_t)Kokkos::atomic_fetch_add(
+        reinterpret_cast<volatile int *>(buffer), 1);
+
+    const uint32_t state_error = state_header != (state & state_header_mask);
+
+    const uint32_t state_bit_used = state & state_used_mask;
+
+    if (state_error || (bit_bound <= state_bit_used)) {
+      Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+      return state_error ? type(-2, -2) : type(-1, -1);
+    }
+
+    // Do not update bit until count is visible:
+
+    Kokkos::memory_fence();
+
+    // There is a zero bit available somewhere,
+    // now find the (first) available bit and set it.
+
+    while (1) {
+      const uint32_t word = bit >> bits_per_int_lg2;
+      const uint32_t mask = 1u << (bit & bits_per_int_mask);
+      const uint32_t prev = Kokkos::atomic_fetch_or(buffer + word + 1, mask);
+
+      if (!(prev & mask)) {
+        // Successfully claimed 'result.first' by
+        // atomically setting that bit.
+        // Flush the set operation. Technically this only needs to be acquire/
+        // release semantics and not sequentially consistent, but for now
+        // we'll just do this.
+        Kokkos::memory_fence();
+        return type(bit, state_bit_used + 1);
+      }
+
+      // Failed race to set the selected bit
+      // Find a new bit to try.
+
+      const int j = Kokkos::Impl::bit_first_zero(prev);
+
+      if (0 <= j) {
+        bit = (word << bits_per_int_lg2) | uint32_t(j);
+      }
+
+      if ((j < 0) || (bit_bound <= bit)) {
+        bit = ((word + 1) < word_count ? ((word + 1) << bits_per_int_lg2) : 0) |
+              (bit & bits_per_int_mask);
+      }
+    }
+  }
+
+  /**\brief
+   *
+   *  Requires: 'bit' previously acquired and has not yet been released.
+   *
+   *  Returns:
+   *    0 <= used count after successful release
+   *    -1 bit was already released
+   *    -2 state_header error
+   */
+  KOKKOS_INLINE_FUNCTION static int release(
+      uint32_t volatile *const buffer, uint32_t const bit,
+      uint32_t const state_header = 0 /* optional header */
+      ) noexcept {
+    if (state_header != (state_header_mask & *buffer)) {
+      return -2;
+    }
+
+    const uint32_t mask = 1u << (bit & bits_per_int_mask);
+    const uint32_t prev =
+        Kokkos::atomic_fetch_and(buffer + (bit >> bits_per_int_lg2) + 1, ~mask);
+
+    if (!(prev & mask)) {
+      return -1;
+    }
+
+    // Do not update count until bit clear is visible
+    Kokkos::memory_fence();
+
+    const int count =
+        Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+
+    // Flush the store-release
+    Kokkos::memory_fence();
+
+    return (count & state_used_mask) - 1;
+  }
+
+  /**\brief
+   *
+   *  Requires: Bit within bounds and not already set.
+   *
+   *  Returns:
+   *    0 <= used count after successful release
+   *    -1 bit was already released
+   *    -2 bit or state_header error
+   */
+  KOKKOS_INLINE_FUNCTION static int set(
+      uint32_t volatile *const buffer, uint32_t const bit,
+      uint32_t const state_header = 0 /* optional header */
+      ) noexcept {
+    if (state_header != (state_header_mask & *buffer)) {
+      return -2;
+    }
+
+    const uint32_t mask = 1u << (bit & bits_per_int_mask);
+    const uint32_t prev =
+        Kokkos::atomic_fetch_or(buffer + (bit >> bits_per_int_lg2) + 1, mask);
+
+    if (!(prev & mask)) {
+      return -1;
+    }
+
+    // Do not update count until bit clear is visible
+    Kokkos::memory_fence();
+
+    const int count =
+        Kokkos::atomic_fetch_add(reinterpret_cast<volatile int *>(buffer), -1);
+
+    return (count & state_used_mask) - 1;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif /* #ifndef KOKKOS_CONCURRENTBITSET_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Core.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Core.cpp
new file mode 100644 (file)
index 0000000..f624e7a
--- /dev/null
@@ -0,0 +1,1162 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Command_Line_Parsing.hpp>
+#include <impl/Kokkos_ParseCommandLineArgumentsAndEnvironmentVariables.hpp>
+#include <impl/Kokkos_DeviceManagement.hpp>
+#include <impl/Kokkos_ExecSpaceManager.hpp>
+
+#include <algorithm>
+#include <cctype>
+#include <cstring>
+#include <iostream>
+#include <sstream>
+#include <cstdlib>
+#include <stack>
+#include <functional>
+#include <list>
+#include <cerrno>
+#include <random>
+#include <regex>
+#ifndef _WIN32
+#include <unistd.h>
+#else
+#include <windows.h>
+#endif
+
+//----------------------------------------------------------------------------
+namespace {
+bool g_is_initialized = false;
+bool g_is_finalized   = false;
+bool g_show_warnings  = true;
+bool g_tune_internals = false;
+// When compiling with clang/LLVM and using the GNU (GCC) C++ Standard Library
+// (any recent version between GCC 7.3 and GCC 9.2), std::deque SEGV's during
+// the unwinding of the atexit(3C) handlers at program termination.  However,
+// this bug is not observable when building with GCC.
+// As an added bonus, std::list<T> provides constant insertion and
+// deletion time complexity, which translates to better run-time performance. As
+// opposed to std::deque<T> which does not provide the same constant time
+// complexity for inserts/removals, since std::deque<T> is implemented as a
+// segmented array.
+using hook_function_type = std::function<void()>;
+std::stack<hook_function_type, std::list<hook_function_type>> finalize_hooks;
+
+/**
+ * The category is only used in printing, tools
+ * get all metadata free of category
+ */
+using metadata_category_type = std::string;
+using metadata_key_type      = std::string;
+using metadata_value_type    = std::string;
+
+std::map<metadata_category_type,
+         std::map<metadata_key_type, metadata_value_type>>
+    metadata_map;
+
+void declare_configuration_metadata(const std::string& category,
+                                    const std::string& key,
+                                    const std::string& value) {
+  metadata_map[category][key] = value;
+}
+
+void combine(Kokkos::InitializationSettings& out,
+             Kokkos::InitializationSettings const& in) {
+#define KOKKOS_IMPL_COMBINE_SETTING(NAME) \
+  if (in.has_##NAME()) {                  \
+    out.set_##NAME(in.get_##NAME());      \
+  }                                       \
+  static_assert(true, "no-op to require trailing semicolon")
+  KOKKOS_IMPL_COMBINE_SETTING(num_threads);
+  KOKKOS_IMPL_COMBINE_SETTING(map_device_id_by);
+  KOKKOS_IMPL_COMBINE_SETTING(device_id);
+  KOKKOS_IMPL_COMBINE_SETTING(num_devices);
+  KOKKOS_IMPL_COMBINE_SETTING(skip_device);
+  KOKKOS_IMPL_COMBINE_SETTING(disable_warnings);
+  KOKKOS_IMPL_COMBINE_SETTING(tune_internals);
+  KOKKOS_IMPL_COMBINE_SETTING(tools_help);
+  KOKKOS_IMPL_COMBINE_SETTING(tools_libs);
+  KOKKOS_IMPL_COMBINE_SETTING(tools_args);
+#undef KOKKOS_IMPL_COMBINE_SETTING
+}
+
+void combine(Kokkos::InitializationSettings& out,
+             Kokkos::Tools::InitArguments const& in) {
+  using Kokkos::Tools::InitArguments;
+  if (in.help != InitArguments::PossiblyUnsetOption::unset) {
+    out.set_tools_help(in.help == InitArguments::PossiblyUnsetOption::on);
+  }
+  if (in.lib != InitArguments::unset_string_option) {
+    out.set_tools_libs(in.lib);
+  }
+  if (in.args != InitArguments::unset_string_option) {
+    out.set_tools_args(in.args);
+  }
+}
+
+void combine(Kokkos::Tools::InitArguments& out,
+             Kokkos::InitializationSettings const& in) {
+  using Kokkos::Tools::InitArguments;
+  if (in.has_tools_help()) {
+    out.help = in.get_tools_help() ? InitArguments::PossiblyUnsetOption::on
+                                   : InitArguments::PossiblyUnsetOption::off;
+  }
+  if (in.has_tools_libs()) {
+    out.lib = in.get_tools_libs();
+  }
+  if (in.has_tools_args()) {
+    out.args = in.get_tools_args();
+  }
+}
+
+int get_device_count() {
+#if defined(KOKKOS_ENABLE_CUDA)
+  return Kokkos::Cuda::detect_device_count();
+#elif defined(KOKKOS_ENABLE_HIP)
+  return Kokkos::Experimental::HIP::detect_device_count();
+#elif defined(KOKKOS_ENABLE_SYCL)
+  return sycl::device::get_devices(sycl::info::device_type::gpu).size();
+#elif defined(KOKKOS_ENABLE_OPENACC)
+  return acc_get_num_devices(
+      Kokkos::Experimental::Impl::OpenACC_Traits::dev_type);
+#else
+  Kokkos::abort("implementation bug");
+  return -1;
+#endif
+}
+
+unsigned get_process_id() {
+#ifdef _WIN32
+  return unsigned(GetCurrentProcessId());
+#else
+  return unsigned(getpid());
+#endif
+}
+
+bool is_valid_num_threads(int x) { return x > 0; }
+
+bool is_valid_device_id(int x) { return x >= 0; }
+
+bool is_valid_map_device_id_by(std::string const& x) {
+  return x == "mpi_rank" || x == "random";
+}
+
+}  // namespace
+
+Kokkos::Impl::ExecSpaceManager& Kokkos::Impl::ExecSpaceManager::get_instance() {
+  static ExecSpaceManager space_initializer = {};
+  return space_initializer;
+}
+
+void Kokkos::Impl::ExecSpaceManager::register_space_factory(
+    const std::string name, std::unique_ptr<ExecSpaceBase> space) {
+  exec_space_factory_list[name] = std::move(space);
+}
+
+void Kokkos::Impl::ExecSpaceManager::initialize_spaces(
+    const InitializationSettings& settings) {
+  // Note: the names of the execution spaces, used as keys in the map, encode
+  // the ordering of the initialization code from the old initialization stuff.
+  // Eventually, we may want to do something less brittle than this, but for now
+  // we're just preserving compatibility with the old implementation.
+  for (auto& to_init : exec_space_factory_list) {
+    to_init.second->initialize(settings);
+  }
+}
+
+void Kokkos::Impl::ExecSpaceManager::finalize_spaces() {
+  for (auto& to_finalize : exec_space_factory_list) {
+    to_finalize.second->finalize();
+  }
+}
+
+void Kokkos::Impl::ExecSpaceManager::static_fence(const std::string& name) {
+  for (auto& to_fence : exec_space_factory_list) {
+    to_fence.second->static_fence(name);
+  }
+}
+void Kokkos::Impl::ExecSpaceManager::print_configuration(std::ostream& os,
+                                                         bool verbose) {
+  for (auto const& to_print : exec_space_factory_list) {
+    to_print.second->print_configuration(os, verbose);
+  }
+}
+
+int Kokkos::Impl::get_ctest_gpu(const char* local_rank_str) {
+  auto const* ctest_kokkos_device_type =
+      std::getenv("CTEST_KOKKOS_DEVICE_TYPE");
+  if (!ctest_kokkos_device_type) {
+    return 0;
+  }
+
+  auto const* ctest_resource_group_count_str =
+      std::getenv("CTEST_RESOURCE_GROUP_COUNT");
+  if (!ctest_resource_group_count_str) {
+    return 0;
+  }
+
+  // Make sure rank is within bounds of resource groups specified by CTest
+  auto resource_group_count = std::stoi(ctest_resource_group_count_str);
+  auto local_rank           = std::stoi(local_rank_str);
+  if (local_rank >= resource_group_count) {
+    std::ostringstream ss;
+    ss << "Error: local rank " << local_rank
+       << " is outside the bounds of resource groups provided by CTest. Raised"
+       << " by Kokkos::Impl::get_ctest_gpu().";
+    throw_runtime_exception(ss.str());
+  }
+
+  // Get the resource types allocated to this resource group
+  std::ostringstream ctest_resource_group;
+  ctest_resource_group << "CTEST_RESOURCE_GROUP_" << local_rank;
+  std::string ctest_resource_group_name = ctest_resource_group.str();
+  auto const* ctest_resource_group_str =
+      std::getenv(ctest_resource_group_name.c_str());
+  if (!ctest_resource_group_str) {
+    std::ostringstream ss;
+    ss << "Error: " << ctest_resource_group_name << " is not specified. Raised"
+       << " by Kokkos::Impl::get_ctest_gpu().";
+    throw_runtime_exception(ss.str());
+  }
+
+  // Look for the device type specified in CTEST_KOKKOS_DEVICE_TYPE
+  bool found_device                        = false;
+  std::string ctest_resource_group_cxx_str = ctest_resource_group_str;
+  std::istringstream instream(ctest_resource_group_cxx_str);
+  while (true) {
+    std::string devName;
+    std::getline(instream, devName, ',');
+    if (devName == ctest_kokkos_device_type) {
+      found_device = true;
+      break;
+    }
+    if (instream.eof() || devName.length() == 0) {
+      break;
+    }
+  }
+
+  if (!found_device) {
+    std::ostringstream ss;
+    ss << "Error: device type '" << ctest_kokkos_device_type
+       << "' not included in " << ctest_resource_group_name
+       << ". Raised by Kokkos::Impl::get_ctest_gpu().";
+    throw_runtime_exception(ss.str());
+  }
+
+  // Get the device ID
+  std::string ctest_device_type_upper = ctest_kokkos_device_type;
+  for (auto& c : ctest_device_type_upper) {
+    c = std::toupper(c);
+  }
+  ctest_resource_group << "_" << ctest_device_type_upper;
+
+  std::string ctest_resource_group_id_name = ctest_resource_group.str();
+  auto resource_str = std::getenv(ctest_resource_group_id_name.c_str());
+  if (!resource_str) {
+    std::ostringstream ss;
+    ss << "Error: " << ctest_resource_group_id_name
+       << " is not specified. Raised by Kokkos::Impl::get_ctest_gpu().";
+    throw_runtime_exception(ss.str());
+  }
+
+  auto const* comma = std::strchr(resource_str, ',');
+  if (!comma || strncmp(resource_str, "id:", 3)) {
+    std::ostringstream ss;
+    ss << "Error: invalid value of " << ctest_resource_group_id_name << ": '"
+       << resource_str << "'. Raised by Kokkos::Impl::get_ctest_gpu().";
+    throw_runtime_exception(ss.str());
+  }
+
+  std::string id(resource_str + 3, comma - resource_str - 3);
+  return std::stoi(id.c_str());
+}
+
+std::vector<int> Kokkos::Impl::get_visible_devices(
+    Kokkos::InitializationSettings const& settings, int device_count) {
+  std::vector<int> visible_devices;
+  char* env_visible_devices = std::getenv("KOKKOS_VISIBLE_DEVICES");
+  if (env_visible_devices) {
+    std::stringstream ss(env_visible_devices);
+    for (int i; ss >> i;) {
+      visible_devices.push_back(i);
+      if (ss.peek() == ',') ss.ignore();
+    }
+    for (auto id : visible_devices) {
+      if (id < 0) {
+        ss << "Error: Invalid device id '" << id
+           << "' in environment variable 'KOKKOS_VISIBLE_DEVICES="
+           << env_visible_devices << "'."
+           << " Device id cannot be negative!"
+           << " Raised by Kokkos::initialize().\n";
+      }
+      if (id >= device_count) {
+        ss << "Error: Invalid device id '" << id
+           << "' in environment variable 'KOKKOS_VISIBLE_DEVICES="
+           << env_visible_devices << "'."
+           << " Device id must be smaller than the number of GPUs available"
+           << " for execution '" << device_count << "'!"
+           << " Raised by Kokkos::initialize().\n";
+      }
+    }
+  } else {
+    int num_devices =
+        settings.has_num_devices() ? settings.get_num_devices() : device_count;
+    if (num_devices > device_count) {
+      std::stringstream ss;
+      ss << "Error: Specified number of devices '" << num_devices
+         << "' exceeds the actual number of GPUs available for execution '"
+         << device_count << "'."
+         << " Raised by Kokkos::initialize().\n";
+      Kokkos::abort(ss.str().c_str());
+    }
+    for (int i = 0; i < num_devices; ++i) {
+      visible_devices.push_back(i);
+    }
+    if (settings.has_skip_device()) {
+      if (visible_devices.size() == 1 && settings.get_skip_device() == 0) {
+        Kokkos::abort(
+            "Error: skipping the only GPU available for execution.\n"
+            " Raised by Kokkos::initialize().\n");
+      }
+      visible_devices.erase(
+          std::remove(visible_devices.begin(), visible_devices.end(),
+                      settings.get_skip_device()),
+          visible_devices.end());
+    }
+  }
+  if (visible_devices.empty()) {
+    Kokkos::abort(
+        "Error: no GPU available for execution.\n"
+        " Raised by Kokkos::initialize().\n");
+  }
+  return visible_devices;
+}
+
+int Kokkos::Impl::get_gpu(const InitializationSettings& settings) {
+  std::vector<int> visible_devices =
+      get_visible_devices(settings, get_device_count());
+  int const num_devices = visible_devices.size();
+  // device_id is provided
+  if (settings.has_device_id()) {
+    int const id = settings.get_device_id();
+    if (id < 0) {
+      std::stringstream ss;
+      ss << "Error: Requested GPU with invalid id '" << id << "'."
+         << " Device id cannot be negative!"
+         << " Raised by Kokkos::initialize().\n";
+      Kokkos::abort(ss.str().c_str());
+    }
+    if (id >= num_devices) {
+      std::stringstream ss;
+      ss << "Error: Requested GPU with id '" << id << "' but only "
+         << num_devices << "GPU(s) available!"
+         << " Raised by Kokkos::initialize().\n";
+      Kokkos::abort(ss.str().c_str());
+    }
+    return visible_devices[settings.get_device_id()];
+  }
+
+  // either random or round-robin assignment based on local MPI rank
+  if (settings.has_map_device_id_by() &&
+      !is_valid_map_device_id_by(settings.get_map_device_id_by())) {
+    std::stringstream ss;
+    ss << "Error: map_device_id_by setting '" << settings.get_map_device_id_by()
+       << "' is not recognized."
+       << " Raised by Kokkos::initialize().\n";
+    Kokkos::abort(ss.str().c_str());
+  }
+
+  if (settings.has_map_device_id_by() &&
+      settings.get_map_device_id_by() == "random") {
+    std::default_random_engine gen(get_process_id());
+    std::uniform_int_distribution<int> distribution(0, num_devices - 1);
+    return visible_devices[distribution(gen)];
+  }
+
+  // either map_device_id_by is not specified or it is mpi_rank
+  if (settings.has_map_device_id_by() &&
+      settings.get_map_device_id_by() != "mpi_rank") {
+    Kokkos::abort("implementation bug");
+  }
+
+  auto const* local_rank_str =
+      std::getenv("OMPI_COMM_WORLD_LOCAL_RANK");  // OpenMPI
+  if (!local_rank_str)
+    local_rank_str = std::getenv("MV2_COMM_WORLD_LOCAL_RANK");  // MVAPICH2
+  if (!local_rank_str) local_rank_str = std::getenv("SLURM_LOCALID");  // SLURM
+
+  // use first GPU available for execution if unable to detect local MPI rank
+  if (!local_rank_str) {
+    if (settings.has_map_device_id_by()) {
+      std::cerr << "Warning: unable to detect local MPI rank."
+                << " Falling back to the first GPU available for execution."
+                << " Raised by Kokkos::initialize()." << std::endl;
+    }
+    return visible_devices[0];
+  }
+
+  // use device assigned by CTest when resource allocation is activated
+  if (std::getenv("CTEST_KOKKOS_DEVICE_TYPE") &&
+      std::getenv("CTEST_RESOURCE_GROUP_COUNT")) {
+    return get_ctest_gpu(local_rank_str);
+  }
+
+  return visible_devices[std::stoi(local_rank_str) % visible_devices.size()];
+}
+
+namespace {
+
+void initialize_backends(const Kokkos::InitializationSettings& settings) {
+// This is an experimental setting
+// For KNL in Flat mode this variable should be set, so that
+// memkind allocates high bandwidth memory correctly.
+#ifdef KOKKOS_ENABLE_HBWSPACE
+  setenv("MEMKIND_HBW_NODES", "1", 0);
+#endif
+
+  Kokkos::Impl::ExecSpaceManager::get_instance().initialize_spaces(settings);
+}
+
+void initialize_profiling(const Kokkos::Tools::InitArguments& args) {
+  auto initialization_status =
+      Kokkos::Tools::Impl::initialize_tools_subsystem(args);
+  if (initialization_status.result ==
+      Kokkos::Tools::Impl::InitializationStatus::InitializationResult::
+          help_request) {
+    g_is_initialized = true;
+    ::Kokkos::finalize();
+    std::exit(EXIT_SUCCESS);
+  } else if (initialization_status.result ==
+             Kokkos::Tools::Impl::InitializationStatus::InitializationResult::
+                 success) {
+    Kokkos::Tools::parseArgs(args.args);
+    for (const auto& category_value : metadata_map) {
+      for (const auto& key_value : category_value.second) {
+        Kokkos::Tools::declareMetadata(key_value.first, key_value.second);
+      }
+    }
+  } else {
+    std::cerr << "Error initializing Kokkos Tools subsystem" << std::endl;
+    g_is_initialized = true;
+    ::Kokkos::finalize();
+    std::exit(EXIT_FAILURE);
+  }
+}
+
+std::string version_string_from_int(int version_number) {
+  std::stringstream str_builder;
+  str_builder << version_number / 10000 << "." << (version_number % 10000) / 100
+              << "." << version_number % 100;
+  return str_builder.str();
+}
+
+void pre_initialize_internal(const Kokkos::InitializationSettings& settings) {
+  if (settings.has_disable_warnings() && settings.get_disable_warnings())
+    g_show_warnings = false;
+  if (settings.has_tune_internals() && settings.get_tune_internals())
+    g_tune_internals = true;
+  declare_configuration_metadata("version_info", "Kokkos Version",
+                                 version_string_from_int(KOKKOS_VERSION));
+#ifdef KOKKOS_COMPILER_APPLECC
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_APPLECC",
+                                 std::to_string(KOKKOS_COMPILER_APPLECC));
+  declare_configuration_metadata("tools_only", "compiler_family", "apple");
+#endif
+#ifdef KOKKOS_COMPILER_CLANG
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_CLANG",
+                                 std::to_string(KOKKOS_COMPILER_CLANG));
+  declare_configuration_metadata("tools_only", "compiler_family", "clang");
+#endif
+#ifdef KOKKOS_COMPILER_CRAYC
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_CRAYC",
+                                 std::to_string(KOKKOS_COMPILER_CRAYC));
+  declare_configuration_metadata("tools_only", "compiler_family", "cray");
+#endif
+#ifdef KOKKOS_COMPILER_GNU
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_GNU",
+                                 std::to_string(KOKKOS_COMPILER_GNU));
+  declare_configuration_metadata("tools_only", "compiler_family", "gnu");
+#endif
+#ifdef KOKKOS_COMPILER_IBM
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_IBM",
+                                 std::to_string(KOKKOS_COMPILER_IBM));
+  declare_configuration_metadata("tools_only", "compiler_family", "ibm");
+#endif
+#ifdef KOKKOS_COMPILER_INTEL
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_INTEL",
+                                 std::to_string(KOKKOS_COMPILER_INTEL));
+  declare_configuration_metadata("tools_only", "compiler_family", "intel");
+#endif
+#ifdef KOKKOS_COMPILER_NVCC
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_NVCC",
+                                 std::to_string(KOKKOS_COMPILER_NVCC));
+  declare_configuration_metadata("tools_only", "compiler_family", "nvcc");
+#endif
+#ifdef KOKKOS_COMPILER_PGI
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_PGI",
+                                 std::to_string(KOKKOS_COMPILER_PGI));
+  declare_configuration_metadata("tools_only", "compiler_family", "pgi");
+#endif
+#ifdef KOKKOS_COMPILER_MSVC
+  declare_configuration_metadata("compiler_version", "KOKKOS_COMPILER_MSVC",
+                                 std::to_string(KOKKOS_COMPILER_MSVC));
+  declare_configuration_metadata("tools_only", "compiler_family", "msvc");
+#endif
+
+#ifdef KOKKOS_ENABLE_GNU_ATOMICS
+  declare_configuration_metadata("atomics", "KOKKOS_ENABLE_GNU_ATOMICS", "yes");
+#else
+  declare_configuration_metadata("atomics", "KOKKOS_ENABLE_GNU_ATOMICS", "no");
+#endif
+#ifdef KOKKOS_ENABLE_INTEL_ATOMICS
+  declare_configuration_metadata("atomics", "KOKKOS_ENABLE_INTEL_ATOMICS",
+                                 "yes");
+#else
+  declare_configuration_metadata("atomics", "KOKKOS_ENABLE_INTEL_ATOMICS",
+                                 "no");
+#endif
+#ifdef KOKKOS_ENABLE_WINDOWS_ATOMICS
+  declare_configuration_metadata("atomics", "KOKKOS_ENABLE_WINDOWS_ATOMICS",
+                                 "yes");
+#else
+  declare_configuration_metadata("atomics", "KOKKOS_ENABLE_WINDOWS_ATOMICS",
+                                 "no");
+#endif
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_IVDEP",
+                                 "yes");
+#else
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_IVDEP",
+                                 "no");
+#endif
+#ifdef KOKKOS_ENABLE_PRAGMA_LOOPCOUNT
+  declare_configuration_metadata("vectorization",
+                                 "KOKKOS_ENABLE_PRAGMA_LOOPCOUNT", "yes");
+#else
+  declare_configuration_metadata("vectorization",
+                                 "KOKKOS_ENABLE_PRAGMA_LOOPCOUNT", "no");
+#endif
+#ifdef KOKKOS_ENABLE_PRAGMA_SIMD
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_SIMD",
+                                 "yes");
+#else
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_SIMD",
+                                 "no");
+#endif
+#ifdef KOKKOS_ENABLE_PRAGMA_UNROLL
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_UNROLL",
+                                 "yes");
+#else
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_UNROLL",
+                                 "no");
+#endif
+#ifdef KOKKOS_ENABLE_PRAGMA_VECTOR
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_VECTOR",
+                                 "yes");
+#else
+  declare_configuration_metadata("vectorization", "KOKKOS_ENABLE_PRAGMA_VECTOR",
+                                 "no");
+#endif
+
+#ifdef KOKKOS_ENABLE_HBWSPACE
+  declare_configuration_metadata("memory", "KOKKOS_ENABLE_HBWSPACE", "yes");
+#else
+  declare_configuration_metadata("memory", "KOKKOS_ENABLE_HBWSPACE", "no");
+#endif
+#ifdef KOKKOS_ENABLE_INTEL_MM_ALLOC
+  declare_configuration_metadata("memory", "KOKKOS_ENABLE_INTEL_MM_ALLOC",
+                                 "yes");
+#else
+  declare_configuration_metadata("memory", "KOKKOS_ENABLE_INTEL_MM_ALLOC",
+                                 "no");
+#endif
+
+#ifdef KOKKOS_ENABLE_ASM
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_ASM", "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_ASM", "no");
+#endif
+#ifdef KOKKOS_ENABLE_CXX14
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX14", "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX14", "no");
+#endif
+#ifdef KOKKOS_ENABLE_CXX17
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX17", "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX17", "no");
+#endif
+#ifdef KOKKOS_ENABLE_CXX20
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX20", "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_CXX20", "no");
+#endif
+#ifdef KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK",
+                                 "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK",
+                                 "no");
+#endif
+#ifdef KOKKOS_ENABLE_HWLOC
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_HWLOC", "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_HWLOC", "no");
+#endif
+#ifdef KOKKOS_ENABLE_LIBRT
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBRT", "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBRT", "no");
+#endif
+#ifdef KOKKOS_ENABLE_LIBDL
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBDL", "yes");
+#else
+  declare_configuration_metadata("options", "KOKKOS_ENABLE_LIBDL", "no");
+#endif
+  declare_configuration_metadata("architecture", "Default Device",
+                                 typeid(Kokkos::DefaultExecutionSpace).name());
+}
+
+void post_initialize_internal(const Kokkos::InitializationSettings& settings) {
+  Kokkos::Tools::InitArguments tools_init_arguments;
+  combine(tools_init_arguments, settings);
+  initialize_profiling(tools_init_arguments);
+  g_is_initialized = true;
+  if (settings.has_print_configuration() &&
+      settings.get_print_configuration()) {
+    ::Kokkos::print_configuration(std::cout);
+  }
+}
+
+void initialize_internal(const Kokkos::InitializationSettings& settings) {
+  pre_initialize_internal(settings);
+  initialize_backends(settings);
+  post_initialize_internal(settings);
+}
+
+void finalize_internal() {
+  typename decltype(finalize_hooks)::size_type numSuccessfulCalls = 0;
+  while (!finalize_hooks.empty()) {
+    auto f = finalize_hooks.top();
+    try {
+      f();
+    } catch (...) {
+      std::cerr << "Kokkos::finalize: A finalize hook (set via "
+                   "Kokkos::push_finalize_hook) threw an exception that it did "
+                   "not catch."
+                   "  Per std::atexit rules, this results in std::terminate.  "
+                   "This is "
+                   "finalize hook number "
+                << numSuccessfulCalls
+                << " (1-based indexing) "
+                   "out of "
+                << finalize_hooks.size()
+                << " to call.  Remember that "
+                   "Kokkos::finalize calls finalize hooks in reverse order "
+                   "from how they "
+                   "were pushed."
+                << std::endl;
+      std::terminate();
+    }
+    finalize_hooks.pop();
+    ++numSuccessfulCalls;
+  }
+
+  Kokkos::Profiling::finalize();
+
+  Kokkos::Impl::ExecSpaceManager::get_instance().finalize_spaces();
+
+  g_is_initialized = false;
+  g_is_finalized   = true;
+  g_show_warnings  = true;
+  g_tune_internals = false;
+}
+
+void fence_internal(const std::string& name) {
+  Kokkos::Impl::ExecSpaceManager::get_instance().static_fence(name);
+}
+
+void print_help_message() {
+  auto const help_message = R"(
+--------------------------------------------------------------------------------
+-------------Kokkos command line arguments--------------------------------------
+--------------------------------------------------------------------------------
+This program is using Kokkos.  You can use the following command line flags to
+control its behavior:
+
+Kokkos Core Options:
+  --kokkos-help                  : print this message
+  --kokkos-disable-warnings      : disable kokkos warning messages
+  --kokkos-print-configuration   : print configuration
+  --kokkos-tune-internals        : allow Kokkos to autotune policies and declare
+                                   tuning features through the tuning system. If
+                                   left off, Kokkos uses heuristics
+  --kokkos-num-threads=INT       : specify total number of threads to use for
+                                   parallel regions on the host.
+  --kokkos-device-id=INT         : specify device id to be used by Kokkos.
+  --kokkos-map-device-id-by=(random|mpi_rank)
+                                 : strategy to select device-id automatically from
+                                   available devices.
+                                   - random:   choose a random device from available.
+                                   - mpi_rank: choose device-id based on a round robin
+                                               assignment of local MPI ranks.
+                                               Works with OpenMPI, MVAPICH, SLURM, and
+                                               derived implementations.
+
+Kokkos Tools Options:
+  --kokkos-tools-libs=STR        : Specify which of the tools to use. Must either
+                                   be full path to library or name of library if the
+                                   path is present in the runtime library search path
+                                   (e.g. LD_LIBRARY_PATH)
+  --kokkos-tools-help            : Query the (loaded) kokkos-tool for its command-line
+                                   option support (which should then be passed via
+                                   --kokkos-tools-args="...")
+  --kokkos-tools-args=STR        : A single (quoted) string of options which will be
+                                   whitespace delimited and passed to the loaded
+                                   kokkos-tool as command-line arguments. E.g.
+                                   `<EXE> --kokkos-tools-args="-c input.txt"` will
+                                   pass `<EXE> -c input.txt` as argc/argv to tool
+
+Except for --kokkos[-tools]-help, you can alternatively set the corresponding
+environment variable of a flag (all letters in upper-case and underscores
+instead of hyphens). For example, to disable warning messages, you can either
+specify --kokkos-disable-warnings or set the KOKKOS_DISABLE_WARNINGS
+environment variable to yes.
+
+Join us on Slack, visit https://kokkosteam.slack.com
+Report bugs to https://github.com/kokkos/kokkos/issues
+--------------------------------------------------------------------------------
+)";
+  std::cout << help_message << std::endl;
+}
+
+}  // namespace
+
+void Kokkos::Impl::parse_command_line_arguments(
+    int& argc, char* argv[], InitializationSettings& settings) {
+  Tools::InitArguments tools_init_arguments;
+  combine(tools_init_arguments, settings);
+  Tools::Impl::parse_command_line_arguments(argc, argv, tools_init_arguments);
+  combine(settings, tools_init_arguments);
+
+  int num_threads;
+  int device_id;
+  int num_devices;  // deprecated
+  int skip_device;  // deprecated
+  std::string map_device_id_by;
+  bool disable_warnings;
+  bool print_configuration;
+  bool tune_internals;
+
+  auto get_flag = [](std::string s) -> std::string {
+    return s.erase(s.find('='));
+  };
+
+  bool help_flag = false;
+
+  int iarg = 0;
+  while (iarg < argc) {
+    bool remove_flag = false;
+
+    if (check_arg(argv[iarg], "--kokkos-numa") ||
+        check_arg(argv[iarg], "--numa")) {
+      warn_deprecated_command_line_argument(get_flag(argv[iarg]));
+      // remove flag if prefixed with '--kokkos-'
+      remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
+    } else if (check_arg_int(argv[iarg], "--kokkos-num-threads", num_threads) ||
+               check_arg_int(argv[iarg], "--num-threads", num_threads) ||
+               check_arg_int(argv[iarg], "--kokkos-threads", num_threads) ||
+               check_arg_int(argv[iarg], "--threads", num_threads)) {
+      if (get_flag(argv[iarg]) != "--kokkos-num-threads") {
+        warn_deprecated_command_line_argument(get_flag(argv[iarg]),
+                                              "--kokkos-num-threads");
+      }
+      if (!is_valid_num_threads(num_threads)) {
+        std::stringstream ss;
+        ss << "Error: command line argument '" << argv[iarg] << "' is invalid."
+           << " The number of threads must be greater than or equal to one."
+           << " Raised by Kokkos::initialize().\n";
+        Kokkos::abort(ss.str().c_str());
+      }
+      settings.set_num_threads(num_threads);
+      remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
+    } else if (check_arg_int(argv[iarg], "--kokkos-device-id", device_id) ||
+               check_arg_int(argv[iarg], "--device-id", device_id) ||
+               check_arg_int(argv[iarg], "--kokkos-device", device_id) ||
+               check_arg_int(argv[iarg], "--device", device_id)) {
+      if (get_flag(argv[iarg]) != "--kokkos-device-id") {
+        warn_deprecated_command_line_argument(get_flag(argv[iarg]),
+                                              "--kokkos-device-id");
+      }
+      if (!is_valid_device_id(device_id)) {
+        std::stringstream ss;
+        ss << "Error: command line argument '" << argv[iarg] << "' is invalid."
+           << " The device id must be greater than or equal to zero."
+           << " Raised by Kokkos::initialize().\n";
+        Kokkos::abort(ss.str().c_str());
+      }
+      settings.set_device_id(device_id);
+      remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
+    } else if (check_arg(argv[iarg], "--kokkos-num-devices") ||
+               check_arg(argv[iarg], "--num-devices") ||
+               check_arg(argv[iarg], "--kokkos-ndevices") ||
+               check_arg(argv[iarg], "--ndevices")) {
+      if (check_arg(argv[iarg], "--num-devices")) {
+        warn_deprecated_command_line_argument("--num-devices",
+                                              "--kokkos-num-devices");
+      }
+      if (check_arg(argv[iarg], "--ndevices")) {
+        warn_deprecated_command_line_argument("--ndevices",
+                                              "--kokkos-num-devices");
+      }
+      if (check_arg(argv[iarg], "--kokkos-ndevices")) {
+        warn_deprecated_command_line_argument("--kokkos-ndevices",
+                                              "--kokkos-num-devices");
+      }
+      warn_deprecated_command_line_argument(
+          "--kokkos-num-devices", "--kokkos-map-device-id-by=mpi_rank");
+      // Find the number of device (expecting --device=XX)
+      if (!((strncmp(argv[iarg], "--kokkos-num-devices=", 21) == 0) ||
+            (strncmp(argv[iarg], "--num-devices=", 14) == 0) ||
+            (strncmp(argv[iarg], "--kokkos-ndevices=", 18) == 0) ||
+            (strncmp(argv[iarg], "--ndevices=", 11) == 0)))
+        throw_runtime_exception(
+            "Error: expecting an '=INT[,INT]' after command line argument "
+            "'--kokkos-num-devices'."
+            " Raised by Kokkos::initialize().");
+
+      char* num1      = strchr(argv[iarg], '=') + 1;
+      char* num2      = strpbrk(num1, ",");
+      int num1_len    = num2 == nullptr ? strlen(num1) : num2 - num1;
+      char* num1_only = new char[num1_len + 1];
+      strncpy(num1_only, num1, num1_len);
+      num1_only[num1_len] = '\0';
+
+      if (!is_unsigned_int(num1_only) || (strlen(num1_only) == 0)) {
+        throw_runtime_exception(
+            "Error: expecting an integer number after command line argument "
+            "'--kokkos-num-devices'."
+            " Raised by Kokkos::initialize().");
+      }
+      if (check_arg(argv[iarg], "--kokkos-num-devices") ||
+          check_arg(argv[iarg], "--kokkos-ndevices")) {
+        num_devices = std::stoi(num1_only);
+        settings.set_num_devices(num_devices);
+        settings.set_map_device_id_by("mpi_rank");
+      }
+      delete[] num1_only;
+
+      if (num2 != nullptr) {
+        if ((!is_unsigned_int(num2 + 1)) || (strlen(num2) == 1))
+          throw_runtime_exception(
+              "Error: expecting an integer number after command line argument "
+              "'--kokkos-num-devices=XX,'."
+              " Raised by Kokkos::initialize().");
+
+        if (check_arg(argv[iarg], "--kokkos-num-devices") ||
+            check_arg(argv[iarg], "--kokkos-ndevices")) {
+          skip_device = std::stoi(num2 + 1);
+          settings.set_skip_device(skip_device);
+        }
+      }
+      remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
+    } else if (check_arg_bool(argv[iarg], "--kokkos-disable-warnings",
+                              disable_warnings)) {
+      settings.set_disable_warnings(disable_warnings);
+      remove_flag = true;
+    } else if (check_arg_bool(argv[iarg], "--kokkos-print-configuration",
+                              print_configuration)) {
+      settings.set_print_configuration(print_configuration);
+      remove_flag = true;
+    } else if (check_arg_bool(argv[iarg], "--kokkos-tune-internals",
+                              tune_internals)) {
+      settings.set_tune_internals(tune_internals);
+      remove_flag = true;
+    } else if (check_arg(argv[iarg], "--kokkos-help") ||
+               check_arg(argv[iarg], "--help")) {
+      help_flag   = true;
+      remove_flag = std::string(argv[iarg]).find("--kokkos-") == 0;
+    } else if (check_arg_str(argv[iarg], "--kokkos-map-device-id-by",
+                             map_device_id_by)) {
+      if (!is_valid_map_device_id_by(map_device_id_by)) {
+        std::stringstream ss;
+        ss << "Warning: command line argument '--kokkos-map-device-id-by="
+           << map_device_id_by << "' is not recognized."
+           << " Raised by Kokkos::initialize().\n";
+        Kokkos::abort(ss.str().c_str());
+      }
+      settings.set_map_device_id_by(map_device_id_by);
+      remove_flag = true;
+    } else if (std::regex_match(argv[iarg],
+                                std::regex("-?-kokkos.*", std::regex::egrep))) {
+      warn_not_recognized_command_line_argument(argv[iarg]);
+    }
+
+    if (remove_flag) {
+      // Shift the remainder of the argv list by one.  Note that argv has
+      // (argc + 1) arguments, the last one always being nullptr.  The following
+      // loop moves the trailing nullptr element as well
+      for (int k = iarg; k < argc; ++k) {
+        argv[k] = argv[k + 1];
+      }
+      argc--;
+    } else {
+      iarg++;
+    }
+  }
+
+  if (help_flag) {
+    print_help_message();
+  }
+
+  if ((tools_init_arguments.args ==
+       Kokkos::Tools::InitArguments::unset_string_option) &&
+      argc > 0) {
+    settings.set_tools_args(argv[0]);
+  }
+}
+
+void Kokkos::Impl::parse_environment_variables(
+    InitializationSettings& settings) {
+  Tools::InitArguments tools_init_arguments;
+  combine(tools_init_arguments, settings);
+  auto init_result =
+      Tools::Impl::parse_environment_variables(tools_init_arguments);
+  if (init_result.result ==
+      Tools::Impl::InitializationStatus::environment_argument_mismatch) {
+    Impl::throw_runtime_exception(init_result.error_message);
+  }
+  combine(settings, tools_init_arguments);
+
+  if (std::getenv("KOKKOS_NUMA")) {
+    warn_deprecated_environment_variable("KOKKOS_NUMA");
+  }
+  int num_threads;
+  if (check_env_int("KOKKOS_NUM_THREADS", num_threads)) {
+    if (!is_valid_num_threads(num_threads)) {
+      std::stringstream ss;
+      ss << "Error: environment variable 'KOKKOS_NUM_THREADS=" << num_threads
+         << "' is invalid."
+         << " The number of threads must be greater than or equal to one."
+         << " Raised by Kokkos::initialize().\n";
+      Kokkos::abort(ss.str().c_str());
+    }
+    settings.set_num_threads(num_threads);
+  }
+  int device_id;
+  if (check_env_int("KOKKOS_DEVICE_ID", device_id)) {
+    if (!is_valid_device_id(device_id)) {
+      std::stringstream ss;
+      ss << "Error: environment variable 'KOKKOS_DEVICE_ID" << device_id
+         << "' is invalid."
+         << " The device id must be greater than or equal to zero."
+         << " Raised by Kokkos::initialize().\n";
+      Kokkos::abort(ss.str().c_str());
+    }
+    settings.set_device_id(device_id);
+  }
+  int num_devices;
+  int rand_devices;
+  bool has_num_devices  = check_env_int("KOKKOS_NUM_DEVICES", num_devices);
+  bool has_rand_devices = check_env_int("KOKKOS_RAND_DEVICES", rand_devices);
+  if (has_rand_devices && has_num_devices) {
+    Impl::throw_runtime_exception(
+        "Error: cannot specify both KOKKOS_NUM_DEVICES and "
+        "KOKKOS_RAND_DEVICES."
+        " Raised by Kokkos::initialize().");
+  }
+  if (has_num_devices) {
+    warn_deprecated_environment_variable("KOKKOS_NUM_DEVICES",
+                                         "KOKKOS_MAP_DEVICE_ID_BY=mpi_rank");
+    settings.set_map_device_id_by("mpi_rank");
+    settings.set_num_devices(num_devices);
+  }
+  if (has_rand_devices) {
+    warn_deprecated_environment_variable("KOKKOS_RAND_DEVICES",
+                                         "KOKKOS_MAP_DEVICE_ID_BY=random");
+    settings.set_map_device_id_by("random");
+    settings.set_num_devices(rand_devices);
+  }
+  if (has_num_devices || has_rand_devices) {
+    int skip_device;
+    if (check_env_int("KOKKOS_SKIP_DEVICE", skip_device)) {
+      settings.set_skip_device(skip_device);
+    }
+  }
+  bool disable_warnings;
+  if (check_env_bool("KOKKOS_DISABLE_WARNINGS", disable_warnings)) {
+    settings.set_disable_warnings(disable_warnings);
+  }
+  bool print_configuration;
+  if (check_env_bool("KOKKOS_PRINT_CONFIGURATION", print_configuration)) {
+    settings.set_print_configuration(print_configuration);
+  }
+  bool tune_internals;
+  if (check_env_bool("KOKKOS_TUNE_INTERNALS", tune_internals)) {
+    settings.set_tune_internals(tune_internals);
+  }
+  char const* map_device_id_by = std::getenv("KOKKOS_MAP_DEVICE_ID_BY");
+  if (map_device_id_by != nullptr) {
+    if (std::getenv("KOKKOS_DEVICE_ID")) {
+      std::cerr << "Warning: environment variable KOKKOS_MAP_DEVICE_ID_BY"
+                << "ignored since KOKKOS_DEVICE_ID is specified."
+                << " Raised by Kokkos::initialize()." << std::endl;
+    }
+    if (!is_valid_map_device_id_by(map_device_id_by)) {
+      std::stringstream ss;
+      ss << "Warning: environment variable 'KOKKOS_MAP_DEVICE_ID_BY="
+         << map_device_id_by << "' is not recognized."
+         << " Raised by Kokkos::initialize().\n";
+      Kokkos::abort(ss.str().c_str());
+    }
+    settings.set_map_device_id_by(map_device_id_by);
+  }
+}
+
+//----------------------------------------------------------------------------
+
+void Kokkos::initialize(int& argc, char* argv[]) {
+  InitializationSettings settings;
+  Impl::parse_environment_variables(settings);
+  Impl::parse_command_line_arguments(argc, argv, settings);
+  initialize_internal(settings);
+}
+
+void Kokkos::initialize(InitializationSettings const& settings) {
+  InitializationSettings tmp;
+  Impl::parse_environment_variables(tmp);
+  combine(tmp, settings);
+  initialize_internal(tmp);
+}
+
+void Kokkos::Impl::pre_initialize(const InitializationSettings& settings) {
+  pre_initialize_internal(settings);
+}
+
+void Kokkos::Impl::post_initialize(const InitializationSettings& settings) {
+  post_initialize_internal(settings);
+}
+
+void Kokkos::push_finalize_hook(std::function<void()> f) {
+  finalize_hooks.push(f);
+}
+
+void Kokkos::finalize() { finalize_internal(); }
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+KOKKOS_DEPRECATED void Kokkos::finalize_all() { finalize_internal(); }
+#endif
+
+#ifdef KOKKOS_COMPILER_INTEL
+void Kokkos::fence() { fence("Kokkos::fence: Unnamed Global Fence"); }
+#endif
+void Kokkos::fence(const std::string& name) { fence_internal(name); }
+
+namespace {
+void print_helper(std::ostream& os,
+                  const std::map<std::string, std::string>& print_me) {
+  for (const auto& kv : print_me) {
+    os << kv.first << ": " << kv.second << '\n';
+  }
+}
+}  // namespace
+
+void Kokkos::print_configuration(std::ostream& os, bool verbose) {
+  print_helper(os, metadata_map["version_info"]);
+
+  os << "Compiler:\n";
+  print_helper(os, metadata_map["compiler_version"]);
+
+  os << "Architecture:\n";
+  print_helper(os, metadata_map["architecture"]);
+
+  os << "Atomics:\n";
+  print_helper(os, metadata_map["atomics"]);
+
+  os << "Vectorization:\n";
+  print_helper(os, metadata_map["vectorization"]);
+
+  os << "Memory:\n";
+  print_helper(os, metadata_map["memory"]);
+
+  os << "Options:\n";
+  print_helper(os, metadata_map["options"]);
+
+  Impl::ExecSpaceManager::get_instance().print_configuration(os, verbose);
+}
+
+KOKKOS_ATTRIBUTE_NODISCARD bool Kokkos::is_initialized() noexcept {
+  return g_is_initialized;
+}
+
+KOKKOS_ATTRIBUTE_NODISCARD bool Kokkos::is_finalized() noexcept {
+  return g_is_finalized;
+}
+
+bool Kokkos::show_warnings() noexcept { return g_show_warnings; }
+
+bool Kokkos::tune_internals() noexcept { return g_tune_internals; }
+
+namespace Kokkos {
+
+#ifdef KOKKOS_COMPILER_PGI
+namespace Impl {
+// Bizzarely, an extra jump instruction forces the PGI compiler to not have a
+// bug related to (probably?) empty base optimization and/or aggregate
+// construction.
+void _kokkos_pgi_compiler_bug_workaround() {}
+}  // end namespace Impl
+#endif
+}  // namespace Kokkos
+
+Kokkos::Impl::InitializationSettingsHelper<std::string>::storage_type const
+    Kokkos::Impl::InitializationSettingsHelper<std::string>::unspecified =
+        "some string we don't expect user would ever provide";
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_GraphNodeKernel.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_GraphNodeKernel.hpp
new file mode 100644 (file)
index 0000000..1edcca8
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_HOST_GRAPHNODEKERNEL_HPP
+#define KOKKOS_KOKKOS_HOST_GRAPHNODEKERNEL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <impl/Kokkos_Default_Graph_fwd.hpp>
+
+#include <Kokkos_Graph.hpp>
+#include <Kokkos_Parallel.hpp>
+#include <Kokkos_Parallel_Reduce.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="GraphNodeKernelImpl"> {{{1
+
+template <class ExecutionSpace>
+struct GraphNodeKernelDefaultImpl {
+  // TODO @graphs decide if this should use vtable or intrusive erasure via
+  //      function pointers like in the rest of the graph interface
+  virtual void execute_kernel() = 0;
+};
+
+// TODO Indicate that this kernel specialization is only for the Host somehow?
+template <class ExecutionSpace, class PolicyType, class Functor,
+          class PatternTag, class... Args>
+class GraphNodeKernelImpl
+    : public PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
+                                              Args..., ExecutionSpace>::type,
+      public GraphNodeKernelDefaultImpl<ExecutionSpace> {
+ public:
+  using base_t =
+      typename PatternImplSpecializationFromTag<PatternTag, Functor, PolicyType,
+                                                Args..., ExecutionSpace>::type;
+  using execute_kernel_vtable_base_t =
+      GraphNodeKernelDefaultImpl<ExecutionSpace>;
+  // We have to use this name here because that's how it was done way back when
+  // then implementations of Impl::Parallel*<> were written
+  using Policy       = PolicyType;
+  using graph_kernel = GraphNodeKernelImpl;
+
+  // TODO @graph kernel name info propagation
+  template <class PolicyDeduced, class... ArgsDeduced>
+  GraphNodeKernelImpl(std::string const&, ExecutionSpace const&,
+                      Functor arg_functor, PolicyDeduced&& arg_policy,
+                      ArgsDeduced&&... args)
+      : base_t(std::move(arg_functor), (PolicyDeduced &&) arg_policy,
+               (ArgsDeduced &&) args...),
+        execute_kernel_vtable_base_t() {}
+
+  // FIXME @graph Forward through the instance once that works in the backends
+  template <class PolicyDeduced, class... ArgsDeduced>
+  GraphNodeKernelImpl(ExecutionSpace const& ex, Functor arg_functor,
+                      PolicyDeduced&& arg_policy, ArgsDeduced&&... args)
+      : GraphNodeKernelImpl("", ex, std::move(arg_functor),
+                            (PolicyDeduced &&) arg_policy,
+                            (ArgsDeduced &&) args...) {}
+
+  void execute_kernel() final { this->base_t::execute(); }
+};
+
+// </editor-fold> end GraphNodeKernelImpl }}}1
+//==============================================================================
+
+template <class ExecutionSpace>
+struct GraphNodeAggregateKernelDefaultImpl
+    : GraphNodeKernelDefaultImpl<ExecutionSpace> {
+  // Aggregates don't need a policy, but for the purposes of checking the static
+  // assertions about graph kernels,
+  struct Policy {
+    using is_graph_kernel = std::true_type;
+  };
+  using graph_kernel = GraphNodeAggregateKernelDefaultImpl;
+  void execute_kernel() final {}
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_HOST_GRAPHNODEKERNEL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_GraphNode_Impl.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_GraphNode_Impl.hpp
new file mode 100644 (file)
index 0000000..3704153
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_HOST_GRAPHNODE_IMPL_HPP
+#define KOKKOS_KOKKOS_HOST_GRAPHNODE_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <impl/Kokkos_Default_Graph_fwd.hpp>
+
+#include <Kokkos_Graph.hpp>
+
+#include <vector>
+#include <memory>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="GraphNodeBackendSpecificDetails"> {{{1
+
+template <class ExecutionSpace>
+struct GraphNodeBackendSpecificDetails {
+ private:
+  using execution_space_instance_storage_t =
+      ExecutionSpaceInstanceStorage<ExecutionSpace>;
+  using default_kernel_impl_t = GraphNodeKernelDefaultImpl<ExecutionSpace>;
+  using default_aggregate_kernel_impl_t =
+      GraphNodeAggregateKernelDefaultImpl<ExecutionSpace>;
+
+  std::vector<std::shared_ptr<GraphNodeBackendSpecificDetails<ExecutionSpace>>>
+      m_predecessors = {};
+
+  Kokkos::ObservingRawPtr<default_kernel_impl_t> m_kernel_ptr = nullptr;
+
+  bool m_has_executed = false;
+  bool m_is_aggregate = false;
+  bool m_is_root      = false;
+
+  template <class>
+  friend struct HostGraphImpl;
+
+ protected:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Ctors, destructor, and assignment"> {{{2
+
+  explicit GraphNodeBackendSpecificDetails() = default;
+
+  explicit GraphNodeBackendSpecificDetails(
+      _graph_node_is_root_ctor_tag) noexcept
+      : m_has_executed(true), m_is_root(true) {}
+
+  GraphNodeBackendSpecificDetails(GraphNodeBackendSpecificDetails const&) =
+      delete;
+
+  GraphNodeBackendSpecificDetails(GraphNodeBackendSpecificDetails&&) noexcept =
+      delete;
+
+  GraphNodeBackendSpecificDetails& operator   =(
+      GraphNodeBackendSpecificDetails const&) = delete;
+
+  GraphNodeBackendSpecificDetails& operator       =(
+      GraphNodeBackendSpecificDetails&&) noexcept = delete;
+
+  ~GraphNodeBackendSpecificDetails() = default;
+
+  // </editor-fold> end Ctors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+ public:
+  void set_kernel(default_kernel_impl_t& arg_kernel) {
+    KOKKOS_EXPECTS(m_kernel_ptr == nullptr)
+    m_kernel_ptr = &arg_kernel;
+  }
+
+  void set_kernel(default_aggregate_kernel_impl_t& arg_kernel) {
+    KOKKOS_EXPECTS(m_kernel_ptr == nullptr)
+    m_kernel_ptr   = &arg_kernel;
+    m_is_aggregate = true;
+  }
+
+  void set_predecessor(
+      std::shared_ptr<GraphNodeBackendSpecificDetails<ExecutionSpace>>
+          arg_pred_impl) {
+    // This method delegates responsibility for executing the predecessor to
+    // this node.  Each node can have at most one predecessor (which may be an
+    // aggregate).
+    KOKKOS_EXPECTS(m_predecessors.empty() || m_is_aggregate)
+    KOKKOS_EXPECTS(bool(arg_pred_impl))
+    KOKKOS_EXPECTS(!m_has_executed)
+    m_predecessors.push_back(std::move(arg_pred_impl));
+  }
+
+  void execute_node() {
+    // This node could have already been executed as the predecessor of some
+    // other
+    KOKKOS_EXPECTS(bool(m_kernel_ptr) || m_has_executed)
+    // Just execute the predecessor here, since calling set_predecessor()
+    // delegates the responsibility for running it to us.
+    if (!m_has_executed) {
+      // I'm pretty sure this doesn't need to be atomic under our current
+      // supported semantics, but instinct I have feels like it should be...
+      m_has_executed = true;
+      for (auto const& predecessor : m_predecessors) {
+        predecessor->execute_node();
+      }
+      m_kernel_ptr->execute_kernel();
+    }
+    KOKKOS_ENSURES(m_has_executed)
+  }
+
+  // This is gross, but for the purposes of our simple default implementation...
+  void reset_has_executed() {
+    for (auto const& predecessor : m_predecessors) {
+      predecessor->reset_has_executed();
+    }
+    // more readable, probably:
+    //   if(!m_is_root) m_has_executed = false;
+    m_has_executed = m_is_root;
+  }
+};
+
+// </editor-fold> end GraphNodeBackendSpecificDetails }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_HOST_GRAPHNODE_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_Graph_Impl.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_Graph_Impl.hpp
new file mode 100644 (file)
index 0000000..e4667ca
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HOST_GRAPH_IMPL_HPP
+#define KOKKOS_HOST_GRAPH_IMPL_HPP
+
+#include <Kokkos_ExecPolicy.hpp>
+#include <Kokkos_Graph.hpp>
+
+#include <impl/Kokkos_GraphImpl_fwd.hpp>
+#include <impl/Kokkos_Default_Graph_fwd.hpp>
+
+#include <Kokkos_Serial.hpp>
+#include <Kokkos_OpenMP.hpp>
+// FIXME @graph other backends?
+
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_EBO.hpp>
+
+#include <set>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="GraphImpl default implementation"> {{{1
+
+template <class ExecutionSpace>
+struct GraphImpl : private ExecutionSpaceInstanceStorage<ExecutionSpace> {
+ public:
+  using root_node_impl_t =
+      GraphNodeImpl<ExecutionSpace, Kokkos::Experimental::TypeErasedTag,
+                    Kokkos::Experimental::TypeErasedTag>;
+
+ private:
+  using execution_space_instance_storage_base_t =
+      ExecutionSpaceInstanceStorage<ExecutionSpace>;
+
+  using node_details_t = GraphNodeBackendSpecificDetails<ExecutionSpace>;
+  std::set<std::shared_ptr<node_details_t>> m_sinks;
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Constructors, destructor, and assignment"> {{{2
+
+  // Not moveable or copyable; it spends its whole live as a shared_ptr in the
+  // Graph object
+  GraphImpl()                 = default;
+  GraphImpl(GraphImpl const&) = delete;
+  GraphImpl(GraphImpl&&)      = delete;
+  GraphImpl& operator=(GraphImpl const&) = delete;
+  GraphImpl& operator=(GraphImpl&&) = delete;
+  ~GraphImpl()                      = default;
+
+  explicit GraphImpl(ExecutionSpace arg_space)
+      : execution_space_instance_storage_base_t(std::move(arg_space)) {}
+
+  // </editor-fold> end Constructors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  ExecutionSpace const& get_execution_space() const {
+    return this
+        ->execution_space_instance_storage_base_t::execution_space_instance();
+  }
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="required customizations"> {{{2
+
+  template <class NodeImpl>
+  //  requires NodeImplPtr is a shared_ptr to specialization of GraphNodeImpl
+  void add_node(std::shared_ptr<NodeImpl> const& arg_node_ptr) {
+    static_assert(
+        NodeImpl::kernel_type::Policy::is_graph_kernel::value,
+        "Something has gone horribly wrong, but it's too complicated to "
+        "explain here.  Buy Daisy a coffee and she'll explain it to you.");
+    // Since this is always called before any calls to add_predecessor involving
+    // it, we can treat this node as a sink until we discover otherwise.
+    arg_node_ptr->node_details_t::set_kernel(arg_node_ptr->get_kernel());
+    auto spot = m_sinks.find(arg_node_ptr);
+    KOKKOS_ASSERT(spot == m_sinks.end())
+    m_sinks.insert(std::move(spot), std::move(arg_node_ptr));
+  }
+
+  template <class NodeImplPtr, class PredecessorRef>
+  // requires PredecessorRef is a specialization of GraphNodeRef that has
+  // already been added to this graph and NodeImpl is a specialization of
+  // GraphNodeImpl that has already been added to this graph.
+  void add_predecessor(NodeImplPtr arg_node_ptr, PredecessorRef arg_pred_ref) {
+    auto node_ptr_spot = m_sinks.find(arg_node_ptr);
+    auto pred_ptr      = GraphAccess::get_node_ptr(arg_pred_ref);
+    auto pred_ref_spot = m_sinks.find(pred_ptr);
+    KOKKOS_ASSERT(node_ptr_spot != m_sinks.end())
+    if (pred_ref_spot != m_sinks.end()) {
+      // delegate responsibility for executing the predecessor to arg_node
+      // and then remove the predecessor from the set of sinks
+      (*node_ptr_spot)->set_predecessor(std::move(*pred_ref_spot));
+      m_sinks.erase(pred_ref_spot);
+    } else {
+      // We still want to check that it's executed, even though someone else
+      // should have executed it before us
+      (*node_ptr_spot)->set_predecessor(std::move(pred_ptr));
+    }
+  }
+
+  template <class... PredecessorRefs>
+  // See requirements/expectations in GraphBuilder
+  auto create_aggregate_ptr(PredecessorRefs&&...) {
+    // The attachment to predecessors, which is all we really need, happens
+    // in the generic layer, which calls through to add_predecessor for
+    // each predecessor ref, so all we need to do here is create the (trivial)
+    // aggregate node.
+    using aggregate_kernel_impl_t =
+        GraphNodeAggregateKernelDefaultImpl<ExecutionSpace>;
+    using aggregate_node_impl_t =
+        GraphNodeImpl<ExecutionSpace, aggregate_kernel_impl_t,
+                      Kokkos::Experimental::TypeErasedTag>;
+    return GraphAccess::make_node_shared_ptr<aggregate_node_impl_t>(
+        this->execution_space_instance(), _graph_node_kernel_ctor_tag{},
+        aggregate_kernel_impl_t{});
+  }
+
+  auto create_root_node_ptr() {
+    auto rv = Kokkos::Impl::GraphAccess::make_node_shared_ptr<root_node_impl_t>(
+        get_execution_space(), _graph_node_is_root_ctor_tag{});
+    m_sinks.insert(rv);
+    return rv;
+  }
+
+  void submit() {
+    // This reset is gross, but for the purposes of our simple host
+    // implementation...
+    for (auto& sink : m_sinks) {
+      sink->reset_has_executed();
+    }
+    for (auto& sink : m_sinks) {
+      sink->execute_node();
+    }
+  }
+
+  // </editor-fold> end required customizations }}}2
+  //----------------------------------------------------------------------------
+};
+
+// </editor-fold> end GraphImpl default implementation }}}1
+//==============================================================================
+
+}  // end namespace Impl
+
+}  // end namespace Kokkos
+
+#include <OpenMP/Kokkos_OpenMP_Parallel.hpp>
+
+#include <impl/Kokkos_Default_GraphNodeKernel.hpp>
+#include <impl/Kokkos_Default_GraphNode_Impl.hpp>
+
+#endif  // KOKKOS_HOST_GRAPH_IMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_Graph_fwd.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Default_Graph_fwd.hpp
new file mode 100644 (file)
index 0000000..cdbed72
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
+#define KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecutionSpace>
+struct GraphNodeKernelDefaultImpl;
+
+template <class ExecutionSpace>
+struct GraphNodeAggregateKernelDefaultImpl;
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_HOST_GRAPH_FWD_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_DeviceManagement.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_DeviceManagement.hpp
new file mode 100644 (file)
index 0000000..34421f0
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_DEVICE_MANAGEMENT_HPP
+#define KOKKOS_DEVICE_MANAGEMENT_HPP
+
+#include <vector>
+
+namespace Kokkos {
+class InitializationSettings;
+namespace Impl {
+int get_gpu(const Kokkos::InitializationSettings& settings);
+// This declaration is provided for testing purposes only
+int get_ctest_gpu(const char* local_rank_str);
+// ditto
+std::vector<int> get_visible_devices(
+    Kokkos::InitializationSettings const& settings, int device_count);
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_EBO.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_EBO.hpp
new file mode 100644 (file)
index 0000000..87d6c04
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EBO_HPP
+#define KOKKOS_EBO_HPP
+
+//----------------------------------------------------------------------------
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+//----------------------------------------------------------------------------
+
+#include <utility>
+#include <type_traits>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <int I>
+struct NotOnDeviceCtorDisambiguator {};
+
+template <class... Args>
+struct NoCtorsNotOnDevice : std::false_type {};
+
+template <class... Args>
+struct DefaultCtorNotOnDevice : std::false_type {};
+
+template <>
+struct DefaultCtorNotOnDevice<> : std::true_type {};
+
+template <class T, bool Empty,
+          template <class...> class CtorNotOnDevice = NoCtorsNotOnDevice>
+struct EBOBaseImpl;
+
+template <class T, template <class...> class CtorNotOnDevice>
+struct EBOBaseImpl<T, true, CtorNotOnDevice> {
+  template <class... Args, class _ignored = void,
+            std::enable_if_t<std::is_void<_ignored>::value &&
+                                 std::is_constructible<T, Args...>::value &&
+                                 !CtorNotOnDevice<Args...>::value,
+                             int> = 0>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr explicit EBOBaseImpl(
+      Args&&...) noexcept {}
+
+  template <class... Args, class _ignored = void,
+            std::enable_if_t<std::is_void<_ignored>::value &&
+                                 std::is_constructible<T, Args...>::value &&
+                                 CtorNotOnDevice<Args...>::value,
+                             long> = 0>
+  inline constexpr explicit EBOBaseImpl(Args&&...) noexcept {}
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl(EBOBaseImpl const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl(EBOBaseImpl&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl& operator=(EBOBaseImpl const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl& operator=(EBOBaseImpl&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~EBOBaseImpl() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr T& _ebo_data_member() & { return *reinterpret_cast<T*>(this); }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr T const& _ebo_data_member() const& {
+    return *reinterpret_cast<T const*>(this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T volatile& _ebo_data_member() volatile& {
+    return *reinterpret_cast<T volatile*>(this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T const volatile& _ebo_data_member() const volatile& {
+    return *reinterpret_cast<T const volatile*>(this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr T&& _ebo_data_member() && {
+    return std::move(*reinterpret_cast<T*>(this));
+  }
+};
+
+template <class T, template <class...> class CTorsNotOnDevice>
+struct EBOBaseImpl<T, false, CTorsNotOnDevice> {
+  T m_ebo_object;
+
+  template <class... Args, class _ignored = void,
+            std::enable_if_t<std::is_void<_ignored>::value &&
+                                 !CTorsNotOnDevice<Args...>::value &&
+                                 std::is_constructible<T, Args...>::value,
+                             int> = 0>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr explicit EBOBaseImpl(
+      Args&&... args) noexcept(noexcept(T(std::forward<Args>(args)...)))
+      : m_ebo_object(std::forward<Args>(args)...) {}
+
+  template <class... Args, class _ignored = void,
+            std::enable_if_t<std::is_void<_ignored>::value &&
+                                 CTorsNotOnDevice<Args...>::value &&
+                                 std::is_constructible<T, Args...>::value,
+                             long> = 0>
+  inline constexpr explicit EBOBaseImpl(Args&&... args) noexcept(
+      noexcept(T(std::forward<Args>(args)...)))
+      : m_ebo_object(std::forward<Args>(args)...) {}
+
+  // TODO @tasking @minor DSH noexcept in the right places?
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl(EBOBaseImpl const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl(EBOBaseImpl&&) noexcept = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl& operator=(EBOBaseImpl const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  constexpr EBOBaseImpl& operator=(EBOBaseImpl&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~EBOBaseImpl() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  T& _ebo_data_member() & { return m_ebo_object; }
+
+  KOKKOS_INLINE_FUNCTION
+  T const& _ebo_data_member() const& { return m_ebo_object; }
+
+  KOKKOS_INLINE_FUNCTION
+  T volatile& _ebo_data_member() volatile& { return m_ebo_object; }
+
+  KOKKOS_INLINE_FUNCTION
+  T const volatile& _ebo_data_member() const volatile& { return m_ebo_object; }
+
+  KOKKOS_INLINE_FUNCTION
+  T&& _ebo_data_member() && { return m_ebo_object; }
+};
+
+/**
+ *
+ * @tparam T
+ */
+template <class T,
+          template <class...> class CtorsNotOnDevice = NoCtorsNotOnDevice>
+struct StandardLayoutNoUniqueAddressMemberEmulation
+    : EBOBaseImpl<T, std::is_empty<T>::value, CtorsNotOnDevice> {
+ private:
+  using ebo_base_t = EBOBaseImpl<T, std::is_empty<T>::value, CtorsNotOnDevice>;
+
+ public:
+  using ebo_base_t::ebo_base_t;
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  constexpr T& no_unique_address_data_member() & {
+    return this->ebo_base_t::_ebo_data_member();
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  constexpr T const& no_unique_address_data_member() const& {
+    return this->ebo_base_t::_ebo_data_member();
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  T volatile& no_unique_address_data_member() volatile& {
+    return this->ebo_base_t::_ebo_data_member();
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  T const volatile& no_unique_address_data_member() const volatile& {
+    return this->ebo_base_t::_ebo_data_member();
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  constexpr T&& no_unique_address_data_member() && {
+    return this->ebo_base_t::_ebo_data_member();
+  }
+};
+
+/**
+ *
+ * @tparam T
+ */
+template <class T,
+          template <class...> class CtorsNotOnDevice = NoCtorsNotOnDevice>
+class NoUniqueAddressMemberEmulation
+    : private StandardLayoutNoUniqueAddressMemberEmulation<T,
+                                                           CtorsNotOnDevice> {
+ private:
+  using base_t =
+      StandardLayoutNoUniqueAddressMemberEmulation<T, CtorsNotOnDevice>;
+
+ public:
+  using base_t::base_t;
+  using base_t::no_unique_address_data_member;
+};
+
+template <class ExecutionSpace>
+class ExecutionSpaceInstanceStorage
+    : private NoUniqueAddressMemberEmulation<ExecutionSpace,
+                                             DefaultCtorNotOnDevice> {
+ private:
+  using base_t =
+      NoUniqueAddressMemberEmulation<ExecutionSpace, DefaultCtorNotOnDevice>;
+
+ protected:
+  constexpr explicit ExecutionSpaceInstanceStorage() : base_t() {}
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr explicit ExecutionSpaceInstanceStorage(
+      ExecutionSpace const& arg_execution_space)
+      : base_t(arg_execution_space) {}
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr explicit ExecutionSpaceInstanceStorage(
+      ExecutionSpace&& arg_execution_space)
+      : base_t(std::move(arg_execution_space)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ExecutionSpace& execution_space_instance() & {
+    return this->no_unique_address_data_member();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  ExecutionSpace const& execution_space_instance() const& {
+    return this->no_unique_address_data_member();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  ExecutionSpace&& execution_space_instance() && {
+    return std::move(*this).no_unique_address_data_member();
+  }
+};
+
+template <class MemorySpace>
+class MemorySpaceInstanceStorage
+    : private NoUniqueAddressMemberEmulation<MemorySpace,
+                                             DefaultCtorNotOnDevice> {
+ private:
+  using base_t =
+      NoUniqueAddressMemberEmulation<MemorySpace, DefaultCtorNotOnDevice>;
+
+ protected:
+  MemorySpaceInstanceStorage() : base_t() {}
+
+  KOKKOS_INLINE_FUNCTION
+  MemorySpaceInstanceStorage(MemorySpace const& arg_memory_space)
+      : base_t(arg_memory_space) {}
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr explicit MemorySpaceInstanceStorage(MemorySpace&& arg_memory_space)
+      : base_t(arg_memory_space) {}
+
+  KOKKOS_INLINE_FUNCTION
+  MemorySpace& memory_space_instance() & {
+    return this->no_unique_address_data_member();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  MemorySpace const& memory_space_instance() const& {
+    return this->no_unique_address_data_member();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  MemorySpace&& memory_space_instance() && {
+    return std::move(*this).no_unique_address_data_member();
+  }
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_EBO_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Error.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Error.cpp
new file mode 100644 (file)
index 0000000..7502283
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <cstring>
+#include <cstdlib>
+
+#include <iostream>
+#include <sstream>
+#include <iomanip>
+#include <stdexcept>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Stacktrace.hpp>
+#include <Cuda/Kokkos_Cuda_Error.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+void traceback_callstack(std::ostream &msg) {
+#ifdef KOKKOS_IMPL_ENABLE_STACKTRACE
+  msg << "\nBacktrace:\n";
+  save_stacktrace();
+  print_demangled_saved_stacktrace(msg);
+#else
+  msg << "\nTraceback functionality not available\n";
+#endif
+}
+
+void throw_runtime_exception(const std::string &msg) {
+  throw std::runtime_error(msg);
+}
+
+void host_abort(const char *const message) {
+  std::cerr << message;
+  traceback_callstack(std::cerr);
+  ::abort();
+}
+
+std::string human_memory_size(size_t arg_bytes) {
+  double bytes   = arg_bytes;
+  const double K = 1024;
+  const double M = K * 1024;
+  const double G = M * 1024;
+
+  std::ostringstream out;
+  if (bytes < K) {
+    out << std::setprecision(4) << bytes << " B";
+  } else if (bytes < M) {
+    bytes /= K;
+    out << std::setprecision(4) << bytes << " K";
+  } else if (bytes < G) {
+    bytes /= M;
+    out << std::setprecision(4) << bytes << " M";
+  } else {
+    bytes /= G;
+    out << std::setprecision(4) << bytes << " G";
+  }
+  return out.str();
+}
+
+}  // namespace Impl
+
+void Experimental::RawMemoryAllocationFailure::print_error_message(
+    std::ostream &o) const {
+  o << "Allocation of size " << Impl::human_memory_size(m_attempted_size);
+  o << " failed";
+  switch (m_failure_mode) {
+    case FailureMode::OutOfMemoryError:
+      o << ", likely due to insufficient memory.";
+      break;
+    case FailureMode::AllocationNotAligned:
+      o << " because the allocation was improperly aligned.";
+      break;
+    case FailureMode::InvalidAllocationSize:
+      o << " because the requested allocation size is not a valid size for the"
+           " requested allocation mechanism (it's probably too large).";
+      break;
+    // TODO move this to the subclass for Cuda-related things
+    case FailureMode::MaximumCudaUVMAllocationsExceeded:
+      o << " because the maximum Cuda UVM allocations was exceeded.";
+      break;
+    case FailureMode::Unknown: o << " because of an unknown error."; break;
+  }
+  o << "  (The allocation mechanism was ";
+  switch (m_mechanism) {
+    case AllocationMechanism::StdMalloc: o << "standard malloc()."; break;
+    case AllocationMechanism::PosixMemAlign: o << "posix_memalign()."; break;
+    case AllocationMechanism::PosixMMap: o << "POSIX mmap()."; break;
+    case AllocationMechanism::IntelMMAlloc:
+      o << "the Intel _mm_malloc() intrinsic.";
+      break;
+    case AllocationMechanism::CudaMalloc: o << "cudaMalloc()."; break;
+    case AllocationMechanism::CudaMallocManaged:
+      o << "cudaMallocManaged().";
+      break;
+    case AllocationMechanism::CudaHostAlloc: o << "cudaHostAlloc()."; break;
+    case AllocationMechanism::HIPMalloc: o << "hipMalloc()."; break;
+    case AllocationMechanism::HIPHostMalloc: o << "hipHostMalloc()."; break;
+    case AllocationMechanism::HIPMallocManaged:
+      o << "hipMallocManaged().";
+      break;
+    case AllocationMechanism::SYCLMallocDevice:
+      o << "sycl::malloc_device().";
+      break;
+    case AllocationMechanism::SYCLMallocShared:
+      o << "sycl::malloc_shared().";
+      break;
+    case AllocationMechanism::SYCLMallocHost:
+      o << "sycl::malloc_host().";
+      break;
+  }
+  append_additional_error_information(o);
+  o << ")" << std::endl;
+}
+
+std::string Experimental::RawMemoryAllocationFailure::get_error_message()
+    const {
+  std::ostringstream out;
+  print_error_message(out);
+  return out.str();
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_CUDA
+namespace Experimental {
+
+void CudaRawMemoryAllocationFailure::append_additional_error_information(
+    std::ostream &o) const {
+  if (m_error_code != cudaSuccess) {
+    o << "  The Cuda allocation returned the error code \"\""
+      << cudaGetErrorName(m_error_code) << "\".";
+  }
+}
+
+}  // end namespace Experimental
+#endif
+
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Error.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Error.hpp
new file mode 100644 (file)
index 0000000..63b40f2
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_ERROR_HPP
+#define KOKKOS_IMPL_ERROR_HPP
+
+#include <string>
+#include <iosfwd>
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_CUDA
+#include <Cuda/Kokkos_Cuda_abort.hpp>
+#endif
+#ifdef KOKKOS_ENABLE_HIP
+#include <HIP/Kokkos_HIP_Abort.hpp>
+#endif
+#ifdef KOKKOS_ENABLE_SYCL
+#include <SYCL/Kokkos_SYCL_Abort.hpp>
+#endif
+
+#ifndef KOKKOS_ABORT_MESSAGE_BUFFER_SIZE
+#define KOKKOS_ABORT_MESSAGE_BUFFER_SIZE 2048
+#endif  // ifndef KOKKOS_ABORT_MESSAGE_BUFFER_SIZE
+
+namespace Kokkos {
+namespace Impl {
+
+[[noreturn]] void host_abort(const char *const);
+
+#if defined(KOKKOS_ENABLE_CUDA) && defined(__CUDA_ARCH__)
+
+#if defined(__APPLE__) || defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+// cuda_abort does not abort when building for macOS.
+// required to workaround failures in random number generator unit tests with
+// pre-volta architectures
+#define KOKKOS_IMPL_ABORT_NORETURN
+#else
+// cuda_abort aborts when building for other platforms than macOS
+#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
+#endif
+
+#elif defined(KOKKOS_COMPILER_NVHPC)
+
+#define KOKKOS_IMPL_ABORT_NORETURN
+
+#elif defined(KOKKOS_ENABLE_HIP) && defined(__HIP_DEVICE_COMPILE__)
+// HIP aborts
+#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
+#elif defined(KOKKOS_ENABLE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+// FIXME_SYCL SYCL doesn't abort
+#define KOKKOS_IMPL_ABORT_NORETURN
+#elif !defined(KOKKOS_ENABLE_OPENMPTARGET)
+// Host aborts
+#define KOKKOS_IMPL_ABORT_NORETURN [[noreturn]]
+#else
+// Everything else does not abort
+#define KOKKOS_IMPL_ABORT_NORETURN
+#endif
+
+#ifdef KOKKOS_ENABLE_SYCL  // FIXME_SYCL
+#define KOKKOS_IMPL_ABORT_NORETURN_DEVICE
+#else
+#define KOKKOS_IMPL_ABORT_NORETURN_DEVICE KOKKOS_IMPL_ABORT_NORETURN
+#endif
+
+#if defined(KOKKOS_ENABLE_CUDA) || defined(KOKKOS_ENABLE_HIP) || \
+    defined(KOKKOS_ENABLE_SYCL) || defined(KOKKOS_ENABLE_OPENMPTARGET)
+KOKKOS_IMPL_ABORT_NORETURN_DEVICE inline KOKKOS_IMPL_DEVICE_FUNCTION void
+device_abort(const char *const msg) {
+#if defined(KOKKOS_ENABLE_CUDA)
+  ::Kokkos::Impl::cuda_abort(msg);
+#elif defined(KOKKOS_ENABLE_HIP)
+  ::Kokkos::Impl::hip_abort(msg);
+#elif defined(KOKKOS_ENABLE_SYCL)
+  ::Kokkos::Impl::sycl_abort(msg);
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
+  printf("%s", msg);  // FIXME_OPENMPTARGET
+#else
+#error faulty logic
+#endif
+}
+#endif
+
+[[noreturn]] void throw_runtime_exception(const std::string &msg);
+
+void traceback_callstack(std::ostream &);
+
+std::string human_memory_size(size_t arg_bytes);
+
+}  // namespace Impl
+
+namespace Experimental {
+
+class RawMemoryAllocationFailure : public std::bad_alloc {
+ public:
+  enum class FailureMode {
+    OutOfMemoryError,
+    AllocationNotAligned,
+    InvalidAllocationSize,
+    MaximumCudaUVMAllocationsExceeded,
+    Unknown
+  };
+  enum class AllocationMechanism {
+    StdMalloc,
+    PosixMemAlign,
+    PosixMMap,
+    IntelMMAlloc,
+    CudaMalloc,
+    CudaMallocManaged,
+    CudaHostAlloc,
+    HIPMalloc,
+    HIPHostMalloc,
+    HIPMallocManaged,
+    SYCLMallocDevice,
+    SYCLMallocShared,
+    SYCLMallocHost
+  };
+
+ private:
+  size_t m_attempted_size;
+  size_t m_attempted_alignment;
+  FailureMode m_failure_mode;
+  AllocationMechanism m_mechanism;
+
+ public:
+  RawMemoryAllocationFailure(
+      size_t arg_attempted_size, size_t arg_attempted_alignment,
+      FailureMode arg_failure_mode = FailureMode::OutOfMemoryError,
+      AllocationMechanism arg_mechanism =
+          AllocationMechanism::StdMalloc) noexcept
+      : m_attempted_size(arg_attempted_size),
+        m_attempted_alignment(arg_attempted_alignment),
+        m_failure_mode(arg_failure_mode),
+        m_mechanism(arg_mechanism) {}
+
+  RawMemoryAllocationFailure() noexcept = delete;
+
+  RawMemoryAllocationFailure(RawMemoryAllocationFailure const &) noexcept =
+      default;
+  RawMemoryAllocationFailure(RawMemoryAllocationFailure &&) noexcept = default;
+
+  RawMemoryAllocationFailure &operator             =(
+      RawMemoryAllocationFailure const &) noexcept = default;
+  RawMemoryAllocationFailure &operator             =(
+      RawMemoryAllocationFailure &&) noexcept = default;
+
+  ~RawMemoryAllocationFailure() noexcept override = default;
+
+  KOKKOS_ATTRIBUTE_NODISCARD
+  const char *what() const noexcept override {
+    if (m_failure_mode == FailureMode::OutOfMemoryError) {
+      return "Memory allocation error: out of memory";
+    } else if (m_failure_mode == FailureMode::AllocationNotAligned) {
+      return "Memory allocation error: allocation result was under-aligned";
+    }
+
+    return nullptr;  // unreachable
+  }
+
+  KOKKOS_ATTRIBUTE_NODISCARD
+  size_t attempted_size() const noexcept { return m_attempted_size; }
+
+  KOKKOS_ATTRIBUTE_NODISCARD
+  size_t attempted_alignment() const noexcept { return m_attempted_alignment; }
+
+  KOKKOS_ATTRIBUTE_NODISCARD
+  AllocationMechanism allocation_mechanism() const noexcept {
+    return m_mechanism;
+  }
+
+  KOKKOS_ATTRIBUTE_NODISCARD
+  FailureMode failure_mode() const noexcept { return m_failure_mode; }
+
+  void print_error_message(std::ostream &o) const;
+  KOKKOS_ATTRIBUTE_NODISCARD
+  std::string get_error_message() const;
+
+  virtual void append_additional_error_information(std::ostream &) const {}
+};
+
+}  // end namespace Experimental
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+KOKKOS_IMPL_ABORT_NORETURN KOKKOS_INLINE_FUNCTION void abort(
+    const char *const message) {
+  KOKKOS_IF_ON_HOST(::Kokkos::Impl::host_abort(message);)
+  KOKKOS_IF_ON_DEVICE(::Kokkos::Impl::device_abort(message);)
+}
+
+#undef KOKKOS_IMPL_ABORT_NORETURN
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#if !defined(NDEBUG) || defined(KOKKOS_ENFORCE_CONTRACTS) || \
+    defined(KOKKOS_ENABLE_DEBUG)
+#define KOKKOS_EXPECTS(...)                                                    \
+  {                                                                            \
+    if (!bool(__VA_ARGS__)) {                                                  \
+      ::Kokkos::abort(                                                         \
+          "Kokkos contract violation:\n  "                                     \
+          "  Expected precondition `" #__VA_ARGS__                             \
+          "` evaluated false.\n"                                               \
+          "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
+              __LINE__) " \n");                                                \
+    }                                                                          \
+  }
+#define KOKKOS_ENSURES(...)                                                    \
+  {                                                                            \
+    if (!bool(__VA_ARGS__)) {                                                  \
+      ::Kokkos::abort(                                                         \
+          "Kokkos contract violation:\n  "                                     \
+          "  Ensured postcondition `" #__VA_ARGS__                             \
+          "` evaluated false.\n"                                               \
+          "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
+              __LINE__) " \n");                                                \
+    }                                                                          \
+  }
+// some projects already define this for themselves, so don't mess
+// them up
+#ifndef KOKKOS_ASSERT
+#define KOKKOS_ASSERT(...)                                                     \
+  {                                                                            \
+    if (!bool(__VA_ARGS__)) {                                                  \
+      ::Kokkos::abort(                                                         \
+          "Kokkos contract violation:\n  "                                     \
+          "  Asserted condition `" #__VA_ARGS__                                \
+          "` evaluated false.\n"                                               \
+          "Error at " KOKKOS_IMPL_TOSTRING(__FILE__) ":" KOKKOS_IMPL_TOSTRING( \
+              __LINE__) " \n");                                                \
+    }                                                                          \
+  }
+#endif  // ifndef KOKKOS_ASSERT
+#else   // not debug mode
+#define KOKKOS_EXPECTS(...)
+#define KOKKOS_ENSURES(...)
+#ifndef KOKKOS_ASSERT
+#define KOKKOS_ASSERT(...)
+#endif  // ifndef KOKKOS_ASSERT
+#endif  // end debug mode ifdefs
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_IMPL_ERROR_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ExecPolicy.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ExecPolicy.cpp
new file mode 100644 (file)
index 0000000..75b89c7
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+#include <sstream>
+
+namespace Kokkos {
+namespace Impl {
+PerTeamValue::PerTeamValue(size_t arg) : value(arg) {}
+
+PerThreadValue::PerThreadValue(size_t arg) : value(arg) {}
+}  // namespace Impl
+
+Impl::PerTeamValue PerTeam(const size_t& arg) {
+  return Impl::PerTeamValue(arg);
+}
+
+Impl::PerThreadValue PerThread(const size_t& arg) {
+  return Impl::PerThreadValue(arg);
+}
+
+void team_policy_check_valid_storage_level_argument(int level) {
+  if (!(level == 0 || level == 1)) {
+    std::stringstream ss;
+    ss << "TeamPolicy::set_scratch_size(/*level*/ " << level
+       << ", ...) storage level argument must be 0 or 1 to be valid\n";
+    Impl::throw_runtime_exception(ss.str());
+  }
+}
+
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ExecSpaceManager.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ExecSpaceManager.hpp
new file mode 100644 (file)
index 0000000..354bdde
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXEC_SPACE_MANAGER_HPP
+#define KOKKOS_EXEC_SPACE_MANAGER_HPP
+
+#include <impl/Kokkos_InitializationSettings.hpp>
+#include <Kokkos_DetectionIdiom.hpp>
+#include <Kokkos_Concepts.hpp>
+
+#include <iosfwd>
+#include <map>
+#include <string>
+
+namespace {
+
+template <class T>
+using public_member_types_t = std::enable_if_t<
+    Kokkos::is_execution_space<typename T::execution_space>::value &&
+    Kokkos::is_memory_space<typename T::memory_space>::value &&
+    Kokkos::is_device<typename T::device_type>::value &&
+    Kokkos::is_array_layout<typename T::array_layout>::value &&
+    std::is_integral<typename T::size_type>::value &&
+    Kokkos::is_memory_space<typename T::scratch_memory_space>::value>;
+
+template <class T>
+using print_configuration_t = std::enable_if_t<
+    std::is_void<decltype(std::declval<T const&>().print_configuration(
+        std::declval<std::ostream&>()))>::value &&
+    std::is_void<decltype(std::declval<T const&>().print_configuration(
+        std::declval<std::ostream&>(), false))>::value>;
+
+template <class T>
+using initialize_finalize_t = std::enable_if_t<
+    std::is_void<decltype(T::impl_initialize(
+        std::declval<Kokkos::InitializationSettings const&>()))>::value &&
+    std::is_void<decltype(T::impl_finalize())>::value>;
+
+template <class T>
+using fence_t = std::enable_if_t<
+    std::is_void<decltype(std::declval<T const&>().fence())>::value &&
+    std::is_void<decltype(std::declval<T const&>().fence("name"))>::value &&
+    std::is_void<decltype(T::impl_static_fence("name"))>::value>;
+
+#define STATIC_ASSERT(...) static_assert(__VA_ARGS__, "")  // FIXME C++17
+
+template <class ExecutionSpace>
+constexpr bool check_valid_execution_space() {
+  using Kokkos::is_detected;
+  STATIC_ASSERT(std::is_default_constructible<ExecutionSpace>::value);
+  STATIC_ASSERT(is_detected<public_member_types_t, ExecutionSpace>::value);
+  STATIC_ASSERT(is_detected<print_configuration_t, ExecutionSpace>::value);
+  STATIC_ASSERT(is_detected<initialize_finalize_t, ExecutionSpace>::value);
+  STATIC_ASSERT(is_detected<fence_t, ExecutionSpace>::value);
+#ifndef KOKKOS_ENABLE_HPX  // FIXME_HPX
+  STATIC_ASSERT(sizeof(ExecutionSpace) <= 2 * sizeof(void*));
+#endif
+  return true;
+}
+
+#undef STATIC_ASSERT
+
+}  // namespace
+
+namespace Kokkos {
+namespace Impl {
+
+struct ExecSpaceBase {
+  virtual void initialize(InitializationSettings const&)           = 0;
+  virtual void finalize()                                          = 0;
+  virtual void static_fence(std::string const&)                    = 0;
+  virtual void print_configuration(std::ostream& os, bool verbose) = 0;
+  virtual ~ExecSpaceBase()                                         = default;
+};
+
+template <class ExecutionSpace>
+struct ExecSpaceDerived : ExecSpaceBase {
+  static_assert(check_valid_execution_space<ExecutionSpace>(), "");
+  void initialize(InitializationSettings const& settings) final {
+    ExecutionSpace::impl_initialize(settings);
+  }
+  void finalize() final { ExecutionSpace::impl_finalize(); }
+  void static_fence(std::string const& label) final {
+    ExecutionSpace::impl_static_fence(label);
+  }
+  void print_configuration(std::ostream& os, bool verbose) final {
+    ExecutionSpace().print_configuration(os, verbose);
+  }
+};
+
+/* ExecSpaceManager - Responsible for initializing all the registered
+ * backends. Backends are registered using the register_space_initializer()
+ * function which should be called from a global context so that it is called
+ * prior to initialize_spaces() which is called from Kokkos::initialize()
+ */
+class ExecSpaceManager {
+  std::map<std::string, std::unique_ptr<ExecSpaceBase>> exec_space_factory_list;
+  ExecSpaceManager() = default;
+
+ public:
+  void register_space_factory(std::string name,
+                              std::unique_ptr<ExecSpaceBase> ptr);
+  void initialize_spaces(const Kokkos::InitializationSettings& settings);
+  void finalize_spaces();
+  void static_fence(const std::string&);
+  void print_configuration(std::ostream& os, bool verbose);
+  static ExecSpaceManager& get_instance();
+};
+
+template <class ExecutionSpace>
+int initialize_space_factory(std::string name) {
+  auto space_ptr = std::make_unique<ExecSpaceDerived<ExecutionSpace>>();
+  ExecSpaceManager::get_instance().register_space_factory(name,
+                                                          std::move(space_ptr));
+  return 1;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_FixedBufferMemoryPool.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_FixedBufferMemoryPool.hpp
new file mode 100644 (file)
index 0000000..3068ef3
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_FIXEDBUFFERMEMORYPOOL_HPP
+#define KOKKOS_IMPL_KOKKOS_FIXEDBUFFERMEMORYPOOL_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Atomic.hpp>
+
+#include <Kokkos_PointerOwnership.hpp>
+#include <impl/Kokkos_SimpleTaskScheduler.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class DeviceType, size_t Size, size_t Align = 1,
+          class SizeType = typename DeviceType::execution_space::size_type>
+class FixedBlockSizeMemoryPool
+    : private MemorySpaceInstanceStorage<typename DeviceType::memory_space> {
+ public:
+  using memory_space = typename DeviceType::memory_space;
+  using size_type    = SizeType;
+
+ private:
+  using memory_space_storage_base =
+      MemorySpaceInstanceStorage<typename DeviceType::memory_space>;
+  using tracker_type = Kokkos::Impl::SharedAllocationTracker;
+  using record_type  = Kokkos::Impl::SharedAllocationRecord<memory_space>;
+
+  struct alignas(Align) Block {
+    union {
+      char ignore;
+      char data[Size];
+    };
+  };
+
+  static constexpr auto actual_size = sizeof(Block);
+
+  // TODO shared allocation tracker
+  // TODO @optimization put the index values on different cache lines (CPU) or
+  // pages (GPU)?
+
+  tracker_type m_tracker                         = {};
+  size_type m_num_blocks                         = 0;
+  size_type m_first_free_idx                     = 0;
+  size_type m_last_free_idx                      = 0;
+  Kokkos::OwningRawPtr<Block> m_first_block      = nullptr;
+  Kokkos::OwningRawPtr<size_type> m_free_indices = nullptr;
+
+  enum : size_type { IndexInUse = ~size_type(0) };
+
+ public:
+  FixedBlockSizeMemoryPool(memory_space const& mem_space, size_type num_blocks)
+      : memory_space_storage_base(mem_space),
+        m_tracker(),
+        m_num_blocks(num_blocks),
+        m_first_free_idx(0),
+        m_last_free_idx(num_blocks) {
+    // TODO alignment?
+    auto block_record = record_type::allocate(
+        mem_space, "FixedBlockSizeMemPool_blocks", num_blocks * sizeof(Block));
+    KOKKOS_ASSERT(intptr_t(block_record->data()) % Align == 0);
+    m_tracker.assign_allocated_record_to_uninitialized(block_record);
+    m_first_block = (Block*)block_record->data();
+
+    auto idx_record =
+        record_type::allocate(mem_space, "Kokkos::FixedBlockSizeMemPool_blocks",
+                              num_blocks * sizeof(size_type));
+    KOKKOS_ASSERT(intptr_t(idx_record->data()) % alignof(size_type) == 0);
+    m_tracker.assign_allocated_record_to_uninitialized(idx_record);
+    m_free_indices = (size_type*)idx_record->data();
+
+    for (size_type i = 0; i < num_blocks; ++i) {
+      m_free_indices[i] = i;
+    }
+
+    Kokkos::memory_fence();
+  }
+
+  // For compatibility with MemoryPool<>
+  FixedBlockSizeMemoryPool(memory_space const& mem_space,
+                           size_t mempool_capacity, unsigned, unsigned,
+                           unsigned)
+      : FixedBlockSizeMemoryPool(
+            mem_space, mempool_capacity /
+                           actual_size) { /* forwarding ctor, must be empty */
+  }
+
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool() = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(
+      FixedBlockSizeMemoryPool&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(
+      FixedBlockSizeMemoryPool const&)                        = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(
+      FixedBlockSizeMemoryPool&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(
+      FixedBlockSizeMemoryPool const&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  void* allocate(size_type alloc_size) const noexcept {
+    (void)alloc_size;
+    KOKKOS_EXPECTS(alloc_size <= Size);
+    auto free_idx_counter = Kokkos::atomic_fetch_add(
+        (volatile size_type*)&m_first_free_idx, size_type(1));
+    auto free_idx_idx = free_idx_counter % m_num_blocks;
+
+    // We don't have exclusive access to m_free_indices[free_idx_idx] because
+    // the allocate counter might have lapped us since we incremented it
+    auto current_free_idx = m_free_indices[free_idx_idx];
+    size_type free_idx    = IndexInUse;
+    free_idx = Kokkos::atomic_compare_exchange(&m_free_indices[free_idx_idx],
+                                               current_free_idx, free_idx);
+    Kokkos::memory_fence();
+
+    // TODO figure out how to decrement here?
+
+    if (free_idx == IndexInUse) {
+      return nullptr;
+    } else {
+      return (void*)&m_first_block[free_idx];
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void deallocate(void* ptr, size_type /*alloc_size*/) const noexcept {
+    // figure out which block we are
+    auto offset = intptr_t(ptr) - intptr_t(m_first_block);
+
+    KOKKOS_EXPECTS(offset % actual_size == 0 &&
+                   offset / actual_size < m_num_blocks);
+
+    Kokkos::memory_fence();
+    auto last_idx_idx = Kokkos::atomic_fetch_add(
+        (volatile size_type*)&m_last_free_idx, size_type(1));
+    last_idx_idx %= m_num_blocks;
+    m_free_indices[last_idx_idx] = offset / actual_size;
+  }
+};
+
+#if 0
+template <
+  class DeviceType,
+  size_t Size,
+  size_t Align=1,
+  class SizeType = typename DeviceType::execution_space::size_type
+>
+class FixedBlockSizeChaseLevMemoryPool
+  : private MemorySpaceInstanceStorage<typename DeviceType::memory_space>
+{
+public:
+
+  using memory_space = typename DeviceType::memory_space;
+  using size_type = SizeType;
+
+private:
+
+  using memory_space_storage_base = MemorySpaceInstanceStorage<typename DeviceType::memory_space>;
+  using tracker_type = Kokkos::Impl::SharedAllocationTracker;
+  using record_type = Kokkos::Impl::SharedAllocationRecord<memory_space>;
+
+  struct alignas(Align) Block { union { char ignore; char data[Size]; }; };
+
+  static constexpr auto actual_size = sizeof(Block);
+
+  tracker_type m_tracker = { };
+  size_type m_num_blocks = 0;
+  size_type m_first_free_idx = 0;
+  size_type m_last_free_idx = 0;
+
+
+  enum : size_type { IndexInUse = ~size_type(0) };
+
+public:
+
+  FixedBlockSizeMemoryPool(
+    memory_space const& mem_space,
+    size_type num_blocks
+  ) : memory_space_storage_base(mem_space),
+    m_tracker(),
+    m_num_blocks(num_blocks),
+    m_first_free_idx(0),
+    m_last_free_idx(num_blocks)
+  {
+    // TODO alignment?
+    auto block_record = record_type::allocate(
+      mem_space, "FixedBlockSizeMemPool_blocks", num_blocks * sizeof(Block)
+    );
+    KOKKOS_ASSERT(intptr_t(block_record->data()) % Align == 0);
+    m_tracker.assign_allocated_record_to_uninitialized(block_record);
+    m_first_block = (Block*)block_record->data();
+
+    auto idx_record = record_type::allocate(
+      mem_space, "FixedBlockSizeMemPool_blocks", num_blocks * sizeof(size_type)
+    );
+    KOKKOS_ASSERT(intptr_t(idx_record->data()) % alignof(size_type) == 0);
+    m_tracker.assign_allocated_record_to_uninitialized(idx_record);
+    m_free_indices = (size_type*)idx_record->data();
+
+    for(size_type i = 0; i < num_blocks; ++i) {
+      m_free_indices[i] = i;
+    }
+
+    Kokkos::memory_fence();
+  }
+
+  // For compatibility with MemoryPool<>
+  FixedBlockSizeMemoryPool(
+    memory_space const& mem_space,
+    size_t mempool_capacity,
+    unsigned, unsigned, unsigned
+  ) : FixedBlockSizeMemoryPool(mem_space, mempool_capacity / actual_size)
+  { /* forwarding ctor, must be empty */ }
+
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool() = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(FixedBlockSizeMemoryPool&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool(FixedBlockSizeMemoryPool const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(FixedBlockSizeMemoryPool&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION FixedBlockSizeMemoryPool& operator=(FixedBlockSizeMemoryPool const&) = default;
+
+
+  KOKKOS_INLINE_FUNCTION
+  void* allocate(size_type alloc_size) const noexcept
+  {
+    KOKKOS_EXPECTS(alloc_size <= Size);
+    auto free_idx_counter = Kokkos::atomic_fetch_add((volatile size_type*)&m_first_free_idx, size_type(1));
+    auto free_idx_idx = free_idx_counter % m_num_blocks;
+
+    // We don't have exclusive access to m_free_indices[free_idx_idx] because
+    // the allocate counter might have lapped us since we incremented it
+    auto current_free_idx = m_free_indices[free_idx_idx];
+    size_type free_idx = IndexInUse;
+    free_idx =
+      Kokkos::atomic_compare_exchange(&m_free_indices[free_idx_idx], current_free_idx, free_idx);
+    Kokkos::memory_fence();
+
+    // TODO figure out how to decrement here?
+
+    if(free_idx == IndexInUse) {
+      return nullptr;
+    }
+    else {
+      return (void*)&m_first_block[free_idx];
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void deallocate(void* ptr, size_type alloc_size) const noexcept
+  {
+    // figure out which block we are
+    auto offset = intptr_t(ptr) - intptr_t(m_first_block);
+
+    KOKKOS_EXPECTS(offset % actual_size == 0 && offset/actual_size < m_num_blocks);
+
+    Kokkos::memory_fence();
+    auto last_idx_idx = Kokkos::atomic_fetch_add((volatile size_type*)&m_last_free_idx, size_type(1));
+    last_idx_idx %= m_num_blocks;
+    m_free_indices[last_idx_idx] = offset / actual_size;
+  }
+
+};
+#endif
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_IMPL_KOKKOS_FIXEDBUFFERMEMORYPOOL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_FunctorAnalysis.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_FunctorAnalysis.hpp
new file mode 100644 (file)
index 0000000..6569e49
--- /dev/null
@@ -0,0 +1,1001 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_FUNCTORANALYSIS_HPP
+#define KOKKOS_FUNCTORANALYSIS_HPP
+
+#include <cstddef>
+#include <Kokkos_Core_fwd.hpp>
+#include <impl/Kokkos_Traits.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+struct FunctorPatternInterface {
+  struct FOR {};
+  struct REDUCE {};
+  struct SCAN {};
+};
+
+template <typename T>
+struct DeduceFunctorPatternInterface;
+
+template <class FunctorType, class ExecPolicy, class ExecutionSpace>
+struct DeduceFunctorPatternInterface<
+    ParallelFor<FunctorType, ExecPolicy, ExecutionSpace>> {
+  using type = FunctorPatternInterface::FOR;
+};
+
+template <class FunctorType, class ExecPolicy, class ReducerType,
+          class ExecutionSpace>
+struct DeduceFunctorPatternInterface<
+    ParallelReduce<FunctorType, ExecPolicy, ReducerType, ExecutionSpace>> {
+  using type = FunctorPatternInterface::REDUCE;
+};
+
+template <class FunctorType, class ExecPolicy, class ExecutionSpace>
+struct DeduceFunctorPatternInterface<
+    ParallelScan<FunctorType, ExecPolicy, ExecutionSpace>> {
+  using type = FunctorPatternInterface::SCAN;
+};
+
+template <class FunctorType, class ExecPolicy, class ReturnType,
+          class ExecutionSpace>
+struct DeduceFunctorPatternInterface<ParallelScanWithTotal<
+    FunctorType, ExecPolicy, ReturnType, ExecutionSpace>> {
+  using type = FunctorPatternInterface::SCAN;
+};
+
+/** \brief  Query Functor and execution policy argument tag for value type.
+ *
+ *  If 'value_type' is not explicitly declared in the functor
+ *  then attempt to deduce the type from FunctorType::operator()
+ *  interface used by the pattern and policy.
+ *
+ *  For the REDUCE pattern generate a Reducer and finalization function
+ *  derived from what is available within the functor.
+ */
+template <typename PatternInterface, class Policy, class Functor>
+struct FunctorAnalysis {
+ private:
+  using FOR    = FunctorPatternInterface::FOR;
+  using REDUCE = FunctorPatternInterface::REDUCE;
+  using SCAN   = FunctorPatternInterface::SCAN;
+
+  //----------------------------------------
+
+  struct void_tag {};
+
+  template <typename P = Policy, typename = std::false_type>
+  struct has_work_tag {
+    using type = void;
+    using wtag = void_tag;
+  };
+
+  template <typename P>
+  struct has_work_tag<P, typename std::is_void<typename P::work_tag>::type> {
+    using type = typename P::work_tag;
+    using wtag = typename P::work_tag;
+  };
+
+  using Tag  = typename has_work_tag<>::type;
+  using WTag = typename has_work_tag<>::wtag;
+
+  //----------------------------------------
+  // Check for T::execution_space
+
+  template <typename T, typename = std::false_type>
+  struct has_execution_space {
+    using type = void;
+    enum : bool { value = false };
+  };
+
+  template <typename T>
+  struct has_execution_space<
+      T, typename std::is_void<typename T::execution_space>::type> {
+    using type = typename T::execution_space;
+    enum : bool { value = true };
+  };
+
+  using policy_has_space  = has_execution_space<Policy>;
+  using functor_has_space = has_execution_space<Functor>;
+
+  static_assert(!policy_has_space::value || !functor_has_space::value ||
+                    std::is_same<typename policy_has_space::type,
+                                 typename functor_has_space::type>::value,
+                "Execution Policy and Functor execution space must match");
+
+  //----------------------------------------
+  // Check for Functor::value_type, which is either a simple type T or T[]
+
+  template <typename F, typename = std::false_type>
+  struct has_value_type {
+    using type = void;
+  };
+
+  template <typename F>
+  struct has_value_type<F,
+                        typename std::is_void<typename F::value_type>::type> {
+    using type = typename F::value_type;
+
+    static_assert(!std::is_reference<type>::value &&
+                      std::rank<type>::value <= 1 &&
+                      std::extent<type>::value == 0,
+                  "Kokkos Functor::value_type is T or T[]");
+  };
+
+  //----------------------------------------
+  // If Functor::value_type does not exist then evaluate operator(),
+  // depending upon the pattern and whether the policy has a work tag,
+  // to determine the reduction or scan value_type.
+
+  template <typename F, typename P = PatternInterface,
+            typename V = typename has_value_type<F>::type,
+            bool T     = std::is_void<Tag>::value>
+  struct deduce_value_type {
+    using type = V;
+  };
+
+  template <typename F>
+  struct deduce_value_type<F, REDUCE, void, true> {
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, M, A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, M, M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, M, M, M, M, M,
+                                                             A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, M, M, M, M, M,
+                                                             M, A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, M, M, M, M, M,
+                                                             M, M, A&) const);
+
+    using type = decltype(deduce(&F::operator()));
+  };
+
+  template <typename F>
+  struct deduce_value_type<F, REDUCE, void, false> {
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, M, M, M,
+                                                             A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, M, M, M,
+                                                             M, A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, M, M, M,
+                                                             M, M, A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, M, M, M,
+                                                             M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, M, M, M,
+                                                             M, M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, M,
+                                                             A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, M,
+                                                             M, A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, M,
+                                                             M, M, A&) const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, M,
+                                                             M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, M,
+                                                             M, M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, M,
+                                                             M, M, M, M, M, A&)
+                                               const);
+
+    template <typename M, typename A>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, M,
+                                                             M, M, M, M, M, M,
+                                                             A&) const);
+
+    using type = decltype(deduce(&F::operator()));
+  };
+
+  template <typename F>
+  struct deduce_value_type<F, SCAN, void, true> {
+    template <typename M, typename A, typename I>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(M, A&, I) const);
+
+    using type = decltype(deduce(&F::operator()));
+  };
+
+  template <typename F>
+  struct deduce_value_type<F, SCAN, void, false> {
+    template <typename M, typename A, typename I>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag, M, A&, I)
+                                               const);
+
+    template <typename M, typename A, typename I>
+    KOKKOS_INLINE_FUNCTION static A deduce(void (Functor::*)(WTag const&, M, A&,
+                                                             I) const);
+
+    using type = decltype(deduce(&F::operator()));
+  };
+
+  //----------------------------------------
+
+  using candidate_type = typename deduce_value_type<Functor>::type;
+
+  enum {
+    candidate_is_void  = std::is_void<candidate_type>::value,
+    candidate_is_array = std::rank<candidate_type>::value == 1
+  };
+
+  //----------------------------------------
+
+ public:
+  using execution_space =
+      std::conditional_t<functor_has_space::value,
+                         typename functor_has_space::type,
+                         std::conditional_t<policy_has_space::value,
+                                            typename policy_has_space::type,
+                                            Kokkos::DefaultExecutionSpace>>;
+
+  using value_type = std::remove_extent_t<candidate_type>;
+
+  static_assert(!std::is_const<value_type>::value,
+                "Kokkos functor operator reduce argument cannot be const");
+
+ private:
+  // Stub to avoid defining a type 'void &'
+  using ValueType = std::conditional_t<candidate_is_void, void_tag, value_type>;
+
+ public:
+  using pointer_type = std::conditional_t<candidate_is_void, void, ValueType*>;
+
+  using reference_type = std::conditional_t<
+      candidate_is_array, ValueType*,
+      std::conditional_t<!candidate_is_void, ValueType&, void>>;
+
+ private:
+  template <bool IsArray, class FF>
+  KOKKOS_INLINE_FUNCTION static constexpr std::enable_if_t<IsArray, unsigned>
+  get_length(FF const& f) {
+    return f.value_count;
+  }
+
+  template <bool IsArray, class FF>
+  KOKKOS_INLINE_FUNCTION static constexpr std::enable_if_t<!IsArray, unsigned>
+  get_length(FF const&) {
+    return candidate_is_void ? 0 : 1;
+  }
+
+ public:
+  enum {
+    StaticValueSize =
+        !candidate_is_void && !candidate_is_array ? sizeof(ValueType) : 0
+  };
+
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_count(
+      const Functor& f) {
+    return FunctorAnalysis::template get_length<candidate_is_array>(f);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_size(
+      const Functor& f) {
+    return FunctorAnalysis::template get_length<candidate_is_array>(f) *
+           sizeof(ValueType);
+  }
+
+  //----------------------------------------
+
+  template <class Unknown>
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_count(
+      const Unknown&) {
+    return candidate_is_void ? 0 : 1;
+  }
+
+  template <class Unknown>
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr unsigned value_size(
+      const Unknown&) {
+    return candidate_is_void ? 0 : sizeof(ValueType);
+  }
+
+ private:
+  //----------------------------------------
+  // parallel_reduce join operator
+
+  template <class F, bool is_array = candidate_is_array>
+  struct has_join_no_tag_function;
+
+  template <class F>
+  struct has_join_no_tag_function<F, /*is_array*/ false> {
+    using ref_type  = ValueType&;
+    using cref_type = const ValueType&;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(ref_type,
+                                                             cref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(ref_type, cref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(*dst, *src);
+    }
+  };
+
+  template <class F>
+  struct has_join_no_tag_function<F, /*is_array*/ true> {
+    using ref_type  = ValueType*;
+    using cref_type = const ValueType*;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(ref_type,
+                                                             cref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(ref_type, cref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(dst, src);
+    }
+  };
+
+  template <class F, bool is_array = candidate_is_array>
+  struct has_volatile_join_no_tag_function;
+
+  template <class F>
+  struct KOKKOS_DEPRECATED_WITH_COMMENT(
+      "Reduce/scan join() taking `volatile`-qualified parameters is "
+      "deprecated. Remove the `volatile` qualifier.")
+      has_volatile_join_no_tag_function<F, /*is_array*/ false> {
+    using vref_type  = volatile ValueType&;
+    using cvref_type = const volatile ValueType&;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(vref_type,
+                                                             cvref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(vref_type,
+                                                          cvref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(*dst, *src);
+    }
+  };
+
+  template <class F>
+  struct KOKKOS_DEPRECATED_WITH_COMMENT(
+      "Reduce/scan join() taking `volatile`-qualified parameters is "
+      "deprecated. Remove the `volatile` qualifier.")
+      has_volatile_join_no_tag_function<F, /*is_array*/ true> {
+    using vref_type  = volatile ValueType*;
+    using cvref_type = const volatile ValueType*;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(vref_type,
+                                                             cvref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(vref_type,
+                                                          cvref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(dst, src);
+    }
+  };
+
+  template <class F, bool is_array = candidate_is_array>
+  struct has_join_tag_function;
+
+  template <class F>
+  struct has_join_tag_function<F, /*is_array*/ false> {
+    using ref_type  = ValueType&;
+    using cref_type = const ValueType&;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, ref_type,
+                                                             cref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, ref_type,
+                                                          cref_type));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             ref_type,
+                                                             cref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&, ref_type,
+                                                          cref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(WTag(), *dst, *src);
+    }
+  };
+
+  template <class F>
+  struct has_join_tag_function<F, /*is_array*/ true> {
+    using ref_type  = ValueType*;
+    using cref_type = const ValueType*;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, ref_type,
+                                                             cref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, ref_type,
+                                                          cref_type));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             ref_type,
+                                                             cref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&, ref_type,
+                                                          cref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(WTag(), dst, src);
+    }
+  };
+
+  template <class F, bool is_array = candidate_is_array>
+  struct has_volatile_join_tag_function;
+
+  template <class F>
+  struct KOKKOS_DEPRECATED_WITH_COMMENT(
+      "Reduce/scan join() taking `volatile`-qualified parameters is "
+      "deprecated. Remove the `volatile` qualifier.")
+      has_volatile_join_tag_function<F, /*is_array*/ false> {
+    using vref_type  = volatile ValueType&;
+    using cvref_type = const volatile ValueType&;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, vref_type,
+                                                             cvref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, vref_type,
+                                                          cvref_type));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             vref_type,
+                                                             cvref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&,
+                                                          vref_type,
+                                                          cvref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(WTag(), *dst, *src);
+    }
+  };
+
+  template <class F>
+  struct KOKKOS_DEPRECATED_WITH_COMMENT(
+      "Reduce/scan join() taking `volatile`-qualified parameters is "
+      "deprecated. Remove the `volatile` qualifier.")
+      has_volatile_join_tag_function<F, /*is_array*/ true> {
+    using vref_type  = volatile ValueType*;
+    using cvref_type = const volatile ValueType*;
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, vref_type,
+                                                             cvref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, vref_type,
+                                                          cvref_type));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             vref_type,
+                                                             cvref_type) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&,
+                                                          vref_type,
+                                                          cvref_type));
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      f->join(WTag(), dst, src);
+    }
+  };
+
+  template <class F, class = void>
+  struct detected_join_no_tag {
+    enum : bool { value = false };
+  };
+
+  template <class F>
+  struct detected_join_no_tag<
+      F, decltype(has_join_no_tag_function<F>::enable_if(&F::join))> {
+    enum : bool { value = true };
+  };
+
+  template <class F, class = void>
+  struct detected_volatile_join_no_tag {
+    enum : bool { value = false };
+  };
+
+  template <class F>
+  struct detected_volatile_join_no_tag<
+      F, decltype(has_volatile_join_no_tag_function<F>::enable_if(&F::join))> {
+    enum : bool { value = true };
+  };
+
+  template <class F, class = void>
+  struct detected_join_tag {
+    enum : bool { value = false };
+  };
+
+  template <class F>
+  struct detected_join_tag<F, decltype(has_join_tag_function<F>::enable_if(
+                                  &F::join))> {
+    enum : bool { value = true };
+  };
+
+  template <class F, class = void>
+  struct detected_volatile_join_tag {
+    enum : bool { value = false };
+  };
+
+  template <class F>
+  struct detected_volatile_join_tag<
+      F, decltype(has_volatile_join_tag_function<F>::enable_if(&F::join))> {
+    enum : bool { value = true };
+  };
+
+  template <class F = Functor, typename = void>
+  struct DeduceJoinNoTag {
+    enum : bool { value = false };
+
+    KOKKOS_INLINE_FUNCTION static void join(F const* const f, ValueType* dst,
+                                            ValueType const* src) {
+      const int n = FunctorAnalysis::value_count(*f);
+      for (int i = 0; i < n; ++i) dst[i] += src[i];
+    }
+  };
+
+  template <class F>
+  struct DeduceJoinNoTag<F, std::enable_if_t<(is_reducer<F>::value ||
+                                              (!is_reducer<F>::value &&
+                                               std::is_void<Tag>::value)) &&
+                                             detected_join_no_tag<F>::value>>
+      : public has_join_no_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  template <class F>
+  struct DeduceJoinNoTag<
+      F,
+      std::enable_if_t<(is_reducer<F>::value ||
+                        (!is_reducer<F>::value && std::is_void<Tag>::value)) &&
+                       (!detected_join_no_tag<F>::value &&
+                        detected_volatile_join_no_tag<F>::value)>>
+      : public has_volatile_join_no_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  template <class F = Functor, typename = void>
+  struct DeduceJoin : public DeduceJoinNoTag<F> {};
+
+  template <class F>
+  struct DeduceJoin<
+      F, std::enable_if_t<!is_reducer<F>::value && detected_join_tag<F>::value>>
+      : public has_join_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  template <class F>
+  struct DeduceJoin<F, std::enable_if_t<!is_reducer<F>::value &&
+                                        (!detected_join_tag<F>::value &&
+                                         detected_volatile_join_tag<F>::value)>>
+      : public has_volatile_join_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  //----------------------------------------
+
+  template <class, bool is_array = candidate_is_array>
+  struct has_init_no_tag_function;
+
+  template <class F>
+  struct has_init_no_tag_function<F, /*is_array*/ false> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(ValueType&) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(ValueType&));
+
+    KOKKOS_INLINE_FUNCTION static void init(F const* const f, ValueType* dst) {
+      f->init(*dst);
+    }
+  };
+
+  template <class F>
+  struct has_init_no_tag_function<F, /*is_array*/ true> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(ValueType*) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(ValueType*));
+
+    KOKKOS_INLINE_FUNCTION static void init(F const* const f, ValueType* dst) {
+      f->init(dst);
+    }
+  };
+
+  template <class, bool is_array = candidate_is_array>
+  struct has_init_tag_function;
+
+  template <class F>
+  struct has_init_tag_function<F, /*is_array*/ false> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, ValueType&)
+                                                     const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             ValueType&) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, ValueType&));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&,
+                                                          ValueType&));
+
+    KOKKOS_INLINE_FUNCTION static void init(F const* const f, ValueType* dst) {
+      f->init(WTag(), *dst);
+    }
+  };
+
+  template <class F>
+  struct has_init_tag_function<F, /*is_array*/ true> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, ValueType*)
+                                                     const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             ValueType*) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, ValueType*));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&,
+                                                          ValueType*));
+
+    KOKKOS_INLINE_FUNCTION static void init(F const* const f, ValueType* dst) {
+      f->init(WTag(), dst);
+    }
+  };
+
+  template <class F = Functor, typename = void>
+  struct DeduceInitNoTag {
+    enum : bool { value = false };
+
+    KOKKOS_INLINE_FUNCTION static void init(F const* const f, ValueType* dst) {
+      const int n = FunctorAnalysis::value_count(*f);
+      for (int i = 0; i < n; ++i) new (&dst[i]) ValueType();
+    }
+  };
+
+  template <class F>
+  struct DeduceInitNoTag<
+      F, std::enable_if_t<is_reducer<F>::value || (!is_reducer<F>::value &&
+                                                   std::is_void<Tag>::value),
+                          decltype(has_init_no_tag_function<F>::enable_if(
+                              &F::init))>>
+      : public has_init_no_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  template <class F = Functor, typename = void>
+  struct DeduceInit : public DeduceInitNoTag<F> {};
+
+  template <class F>
+  struct DeduceInit<
+      F,
+      std::enable_if_t<!is_reducer<F>::value,
+                       decltype(has_init_tag_function<F>::enable_if(&F::init))>>
+      : public has_init_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  //----------------------------------------
+
+  template <class, bool is_array = candidate_is_array>
+  struct has_final_no_tag_function;
+
+  // No tag, not array
+  template <class F>
+  struct has_final_no_tag_function<F, /*is_array*/ false> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(ValueType&) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(ValueType&));
+
+    KOKKOS_INLINE_FUNCTION static void final(F const* const f, ValueType* dst) {
+      f->final(*dst);
+    }
+  };
+
+  // No tag, is array
+  template <class F>
+  struct has_final_no_tag_function<F, /*is_array*/ true> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(ValueType*) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(ValueType*));
+
+    KOKKOS_INLINE_FUNCTION static void final(F const* const f, ValueType* dst) {
+      f->final(dst);
+    }
+  };
+
+  template <class, bool is_array = candidate_is_array>
+  struct has_final_tag_function;
+
+  // Has tag, not array
+  template <class F>
+  struct has_final_tag_function<F, /*is_array*/ false> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, ValueType&)
+                                                     const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             ValueType&) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, ValueType&));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&,
+                                                          ValueType&));
+
+    KOKKOS_INLINE_FUNCTION static void final(F const* const f, ValueType* dst) {
+      f->final(WTag(), *dst);
+    }
+  };
+
+  // Has tag, is array
+  template <class F>
+  struct has_final_tag_function<F, /*is_array*/ true> {
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag, ValueType*)
+                                                     const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (F::*)(WTag const&,
+                                                             ValueType*) const);
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag, ValueType*));
+
+    KOKKOS_INLINE_FUNCTION static void enable_if(void (*)(WTag const&,
+                                                          ValueType*));
+
+    KOKKOS_INLINE_FUNCTION static void final(F const* const f, ValueType* dst) {
+      f->final(WTag(), dst);
+    }
+  };
+
+  template <class F = Functor, typename = void>
+  struct DeduceFinalNoTag {
+    enum : bool { value = false };
+
+    KOKKOS_INLINE_FUNCTION
+    static void final(F const* const, ValueType*) {}
+  };
+
+  template <class F>
+  struct DeduceFinalNoTag<
+      F, std::enable_if_t<is_reducer<F>::value || (!is_reducer<F>::value &&
+                                                   std::is_void<Tag>::value),
+                          decltype(has_final_no_tag_function<F>::enable_if(
+                              &F::final))>>
+      : public has_final_no_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  template <class F = Functor, typename = void>
+  struct DeduceFinal : public DeduceFinalNoTag<F> {};
+
+  template <class F>
+  struct DeduceFinal<F, std::enable_if_t<!is_reducer<F>::value,
+                                         decltype(has_final_tag_function<
+                                                  F>::enable_if(&F::final))>>
+      : public has_final_tag_function<F> {
+    enum : bool { value = true };
+  };
+
+  //----------------------------------------
+
+  template <class F = Functor, typename = void>
+  struct DeduceTeamShmem {
+    enum : bool { value = false };
+
+    static size_t team_shmem_size(F const&, int) { return 0; }
+  };
+
+  template <class F>
+  struct DeduceTeamShmem<F, std::enable_if_t<0 < sizeof(&F::team_shmem_size)>> {
+    enum : bool { value = true };
+
+    static size_t team_shmem_size(F const* const f, int team_size) {
+      return f->team_shmem_size(team_size);
+    }
+  };
+
+  template <class F>
+  struct DeduceTeamShmem<F,
+                         std::enable_if_t<(0 < sizeof(&F::shmem_size)) &&
+                                          !(0 < sizeof(&F::team_shmem_size))>> {
+    enum : bool { value = true };
+
+    static size_t team_shmem_size(F const* const f, int team_size) {
+      return f->shmem_size(team_size);
+    }
+  };
+
+  //----------------------------------------
+
+ public:
+  inline static size_t team_shmem_size(Functor const& f) {
+    return DeduceTeamShmem<>::team_shmem_size(f);
+  }
+
+  //----------------------------------------
+
+  enum { has_join_member_function = DeduceJoin<>::value };
+  enum { has_init_member_function = DeduceInit<>::value };
+  enum { has_final_member_function = DeduceFinal<>::value };
+
+  static_assert((Kokkos::is_reducer<Functor>::value &&
+                 has_join_member_function) ||
+                    !Kokkos::is_reducer<Functor>::value,
+                "Reducer must have a join member function!");
+
+  struct Reducer {
+   private:
+    Functor const* const m_functor;
+
+    template <bool IsArray>
+    KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<IsArray, int> len() const
+        noexcept {
+      return m_functor->value_count;
+    }
+
+    template <bool IsArray>
+    KOKKOS_INLINE_FUNCTION constexpr std::enable_if_t<!IsArray, int> len() const
+        noexcept {
+      return candidate_is_void ? 0 : 1;
+    }
+
+   public:
+    using reducer        = Reducer;
+    using value_type     = std::remove_const_t<FunctorAnalysis::value_type>;
+    using pointer_type   = value_type*;
+    using reference_type = FunctorAnalysis::reference_type;
+    using functor_type   = Functor;  // Adapts a functor
+
+    template <bool is_array = candidate_is_array>
+    KOKKOS_INLINE_FUNCTION static std::enable_if_t<is_array, reference_type>
+    reference(ValueType* dst) noexcept {
+      return dst;
+    }
+
+    template <bool is_array = candidate_is_array>
+    KOKKOS_INLINE_FUNCTION static std::enable_if_t<!is_array, reference_type>
+    reference(ValueType* dst) noexcept {
+      return *dst;
+    }
+
+    KOKKOS_INLINE_FUNCTION constexpr int length() const noexcept {
+      return Reducer::template len<candidate_is_array>();
+    }
+
+    KOKKOS_INLINE_FUNCTION
+    void copy(ValueType* const dst, ValueType const* const src) const noexcept {
+      for (int i = 0; i < Reducer::template len<candidate_is_array>(); ++i)
+        dst[i] = src[i];
+    }
+
+    KOKKOS_INLINE_FUNCTION
+    void join(ValueType* dst, ValueType const* src) const noexcept {
+      DeduceJoin<>::join(m_functor, dst, src);
+    }
+
+    KOKKOS_INLINE_FUNCTION reference_type init(ValueType* const dst) const
+        noexcept {
+      DeduceInit<>::init(m_functor, dst);
+      return reference(dst);
+    }
+
+    KOKKOS_INLINE_FUNCTION
+    void final(ValueType* dst) const noexcept {
+      DeduceFinal<>::final(m_functor, dst);
+    }
+
+    Reducer(Reducer const&) = default;
+    Reducer(Reducer&&)      = default;
+    Reducer& operator=(Reducer const&) = delete;
+    Reducer& operator=(Reducer&&) = delete;
+    ~Reducer()                    = default;
+
+    KOKKOS_INLINE_FUNCTION explicit constexpr Reducer(
+        Functor const* arg_functor) noexcept
+        : m_functor(arg_functor) {}
+  };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* KOKKOS_FUNCTORANALYSIS_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl.hpp
new file mode 100644 (file)
index 0000000..9bf9e29
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_GRAPHIMPL_HPP
+#define KOKKOS_IMPL_KOKKOS_GRAPHIMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <Kokkos_Concepts.hpp>  // is_execution_policy
+#include <Kokkos_PointerOwnership.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp>
+
+#include <memory>  // std::make_shared
+
+namespace Kokkos {
+namespace Impl {
+
+struct GraphAccess {
+  template <class ExecutionSpace>
+  static Kokkos::Experimental::Graph<ExecutionSpace> construct_graph(
+      ExecutionSpace ex) {
+    //----------------------------------------//
+    return Kokkos::Experimental::Graph<ExecutionSpace>{
+        std::make_shared<GraphImpl<ExecutionSpace>>(std::move(ex))};
+    //----------------------------------------//
+  }
+  template <class ExecutionSpace>
+  static auto create_root_ref(
+      Kokkos::Experimental::Graph<ExecutionSpace>& arg_graph) {
+    auto const& graph_impl_ptr = arg_graph.m_impl_ptr;
+
+    auto root_ptr = graph_impl_ptr->create_root_node_ptr();
+
+    return Kokkos::Experimental::GraphNodeRef<ExecutionSpace>{
+        graph_impl_ptr, std::move(root_ptr)};
+  }
+
+  template <class NodeType, class... Args>
+  static auto make_node_shared_ptr(Args&&... args) {
+    static_assert(
+        Kokkos::Impl::is_specialization_of<NodeType, GraphNodeImpl>::value,
+        "Kokkos Internal Error in graph interface");
+    return std::make_shared<NodeType>((Args &&) args...);
+  }
+
+  template <class GraphImplWeakPtr, class ExecutionSpace, class Kernel,
+            class Predecessor>
+  static auto make_graph_node_ref(
+      GraphImplWeakPtr graph_impl,
+      std::shared_ptr<
+          Kokkos::Impl::GraphNodeImpl<ExecutionSpace, Kernel, Predecessor>>
+          pred_impl) {
+    //----------------------------------------
+    return Kokkos::Experimental::GraphNodeRef<ExecutionSpace, Kernel,
+                                              Predecessor>{
+        std::move(graph_impl), std::move(pred_impl)};
+    //----------------------------------------
+  }
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="accessors for private members of public interface"> {{{2
+
+  template <class NodeRef>
+  static auto get_node_ptr(NodeRef&& node_ref) {
+    static_assert(
+        is_specialization_of<remove_cvref_t<NodeRef>,
+                             Kokkos::Experimental::GraphNodeRef>::value,
+        "Kokkos Internal Implementation error (bad argument to "
+        "`GraphAccess::get_node_ptr()`)");
+    return ((NodeRef &&) node_ref).get_node_ptr();
+  }
+
+  template <class NodeRef>
+  static auto get_graph_weak_ptr(NodeRef&& node_ref) {
+    static_assert(
+        is_specialization_of<remove_cvref_t<NodeRef>,
+                             Kokkos::Experimental::GraphNodeRef>::value,
+        "Kokkos Internal Implementation error (bad argument to "
+        "`GraphAccess::get_graph_weak_ptr()`)");
+    return ((NodeRef &&) node_ref).get_graph_weak_ptr();
+  }
+
+  // </editor-fold> end accessors for private members of public interface }}}2
+  //----------------------------------------------------------------------------
+};
+
+template <class Policy>
+struct _add_graph_kernel_tag;
+
+template <template <class...> class PolicyTemplate, class... PolicyTraits>
+struct _add_graph_kernel_tag<PolicyTemplate<PolicyTraits...>> {
+  using type = PolicyTemplate<PolicyTraits..., IsGraphKernelTag>;
+};
+
+}  // end namespace Impl
+
+namespace Experimental {  // but not for users, so...
+
+template <class Policy>
+// requires ExecutionPolicy<Policy>
+constexpr auto require(Policy const& policy,
+                       Kokkos::Impl::KernelInGraphProperty) {
+  static_assert(Kokkos::is_execution_policy<Policy>::value,
+                "Internal implementation error!");
+  return typename Kokkos::Impl::_add_graph_kernel_tag<Policy>::type{policy};
+}
+
+}  // end namespace Experimental
+
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_IMPL_KOKKOS_GRAPHIMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl_Utilities.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl_Utilities.hpp
new file mode 100644 (file)
index 0000000..109d37a
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_GRAPHIMPL_UTILITIES_HPP
+#define KOKKOS_KOKKOS_GRAPHIMPL_UTILITIES_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <type_traits>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="is_compatible_type_erasure"> {{{1
+
+template <class Src, class Dst, class Enable = void>
+struct is_compatible_type_erasure : std::false_type {};
+
+template <class T>
+struct is_compatible_type_erasure<T, Kokkos::Experimental::TypeErasedTag>
+    : std::true_type {};
+
+template <>
+struct is_compatible_type_erasure<Kokkos::Experimental::TypeErasedTag,
+                                  Kokkos::Experimental::TypeErasedTag>
+    : std::true_type {};
+
+template <class T>
+struct is_compatible_type_erasure<T, T> : std::true_type {};
+
+// So there are a couple of ways we could do this, but I didn't want to set up
+// all of the machinery to do a lazy instantiation of the convertibility
+// condition in the converting constructor of GraphNodeRef, so I'm going with
+// this for now:
+// TODO @desul-integration make this variadic once we have a meta-conjunction
+template <template <class, class, class> class Template, class TSrc, class USrc,
+          class VSrc, class TDst, class UDst, class VDst>
+struct is_compatible_type_erasure<
+    Template<TSrc, USrc, VSrc>, Template<TDst, UDst, VDst>,
+    // Because gcc thinks this is ambiguous, we need to add this:
+    std::enable_if_t<!std::is_same<TSrc, TDst>::value ||
+                     !std::is_same<USrc, UDst>::value ||
+                     !std::is_same<VSrc, VDst>::value>>
+    : std::integral_constant<
+          bool, is_compatible_type_erasure<TSrc, TDst>::value &&
+                    is_compatible_type_erasure<USrc, UDst>::value &&
+                    is_compatible_type_erasure<VSrc, VDst>::value> {};
+
+// </editor-fold> end is_compatible_type_erasure }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="is_more_type_erased"> {{{1
+
+template <class T, class U>
+struct is_more_type_erased : std::false_type {};
+
+template <class T>
+struct is_more_type_erased<Kokkos::Experimental::TypeErasedTag, T>
+    : std::true_type {};
+
+template <>
+struct is_more_type_erased<Kokkos::Experimental::TypeErasedTag,
+                           Kokkos::Experimental::TypeErasedTag>
+    : std::false_type {};
+
+// TODO @desul-integration variadic version of this, like the above
+
+// </editor-fold> end is_more_type_erased }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_GRAPHIMPL_UTILITIES_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl_fwd.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphImpl_fwd.hpp
new file mode 100644 (file)
index 0000000..37c5307
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
+#define KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecutionSpace, class Kernel, class Predecessor>
+struct GraphNodeImpl;
+
+template <class ExecutionSpace>
+struct GraphImpl;
+
+template <class ExecutionSpace, class Policy, class Functor,
+          class KernelTypeTag, class... Args>
+class GraphNodeKernelImpl;
+
+struct _graph_node_kernel_ctor_tag {};
+struct _graph_node_predecessor_ctor_tag {};
+struct _graph_node_is_root_ctor_tag {};
+
+struct GraphAccess;
+
+// Customizable for backends
+template <class ExecutionSpace>
+struct GraphNodeBackendSpecificDetails;
+
+// Customizable for backends
+template <class ExecutionSpace, class Kernel, class PredecessorRef>
+struct GraphNodeBackendDetailsBeforeTypeErasure;
+
+// TODO move this to a more appropriate place
+struct DoNotExplicitlySpecifyThisTemplateParameter;
+
+struct KernelInGraphProperty {};
+
+struct IsGraphKernelTag {};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_IMPL_KOKKOS_GRAPHIMPL_FWD_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphNodeCustomization.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphNodeCustomization.hpp
new file mode 100644 (file)
index 0000000..fc75f94
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_GRAPHNODECUSTOMIZATION_HPP
+#define KOKKOS_IMPL_KOKKOS_GRAPHNODECUSTOMIZATION_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Graph_fwd.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// Customizable for backends
+template <class ExecutionSpace, class Kernel, class PredecessorRef>
+struct GraphNodeBackendDetailsBeforeTypeErasure {
+ protected:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="ctors, destructor, and assignment"> {{{2
+
+  // Required constructors in customizations:
+  GraphNodeBackendDetailsBeforeTypeErasure(
+      ExecutionSpace const&, Kernel&, PredecessorRef const&,
+      GraphNodeBackendSpecificDetails<ExecutionSpace>&
+      /* this_as_details */) noexcept {}
+  GraphNodeBackendDetailsBeforeTypeErasure(
+      ExecutionSpace const&, _graph_node_is_root_ctor_tag,
+      GraphNodeBackendSpecificDetails<ExecutionSpace>&
+      /* this_as_details */) noexcept {}
+
+  // Not copyable or movable at the concept level, so the default
+  // implementation shouldn't be either.
+  GraphNodeBackendDetailsBeforeTypeErasure() = delete;
+
+  GraphNodeBackendDetailsBeforeTypeErasure(
+      GraphNodeBackendDetailsBeforeTypeErasure const&) = delete;
+
+  GraphNodeBackendDetailsBeforeTypeErasure(
+      GraphNodeBackendDetailsBeforeTypeErasure&&) = delete;
+
+  GraphNodeBackendDetailsBeforeTypeErasure& operator   =(
+      GraphNodeBackendDetailsBeforeTypeErasure const&) = delete;
+
+  GraphNodeBackendDetailsBeforeTypeErasure& operator=(
+      GraphNodeBackendDetailsBeforeTypeErasure&&) = delete;
+
+  ~GraphNodeBackendDetailsBeforeTypeErasure() = default;
+
+  // </editor-fold> end ctors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_GRAPHNODECUSTOMIZATION_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphNodeImpl.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_GraphNodeImpl.hpp
new file mode 100644 (file)
index 0000000..2515995
--- /dev/null
@@ -0,0 +1,298 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_GRAPHNODEIMPL_HPP
+#define KOKKOS_IMPL_GRAPHNODEIMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Graph_fwd.hpp>
+
+#include <impl/Kokkos_SimpleTaskScheduler.hpp>  // ExecutionSpaceInstanceStorage
+#include <impl/Kokkos_GraphImpl.hpp>
+#include <impl/Kokkos_GraphNodeCustomization.hpp>
+
+#include <impl/Kokkos_EBO.hpp>
+
+#include <memory>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="Fully type-erased GraphNodeImpl"> {{{1
+
+// Base specialization for the case where both the kernel and the predecessor
+// type information is type-erased
+template <class ExecutionSpace>
+struct GraphNodeImpl<ExecutionSpace, Kokkos::Experimental::TypeErasedTag,
+                     Kokkos::Experimental::TypeErasedTag>
+    : GraphNodeBackendSpecificDetails<ExecutionSpace>,
+      ExecutionSpaceInstanceStorage<ExecutionSpace> {
+ public:
+  using node_ref_t =
+      Kokkos::Experimental::GraphNodeRef<ExecutionSpace,
+                                         Kokkos::Experimental::TypeErasedTag,
+                                         Kokkos::Experimental::TypeErasedTag>;
+
+ protected:
+  using implementation_base_t = GraphNodeBackendSpecificDetails<ExecutionSpace>;
+  using execution_space_storage_base_t =
+      ExecutionSpaceInstanceStorage<ExecutionSpace>;
+
+ public:
+  virtual ~GraphNodeImpl() = default;
+
+ protected:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="protected ctors and destructors"> {{{2
+
+  explicit GraphNodeImpl(ExecutionSpace const& ex) noexcept
+      : implementation_base_t(), execution_space_storage_base_t(ex) {}
+
+  // </editor-fold> end protected ctors and destructors }}}2
+  //----------------------------------------------------------------------------
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="public(-ish) constructors"> {{{2
+
+  template <class... Args>
+  GraphNodeImpl(ExecutionSpace const& ex, _graph_node_is_root_ctor_tag,
+                Args&&... args) noexcept
+      : implementation_base_t(_graph_node_is_root_ctor_tag{},
+                              (Args &&) args...),
+        execution_space_storage_base_t(ex) {}
+
+  // </editor-fold> end public(-ish) constructors }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="no other constructors"> {{{2
+
+  GraphNodeImpl()                     = delete;
+  GraphNodeImpl(GraphNodeImpl const&) = delete;
+  GraphNodeImpl(GraphNodeImpl&&)      = delete;
+  GraphNodeImpl& operator=(GraphNodeImpl const&) = delete;
+  GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
+
+  // </editor-fold> end no other constructors }}}2
+  //----------------------------------------------------------------------------
+
+  ExecutionSpace const& execution_space_instance() const {
+    return this->execution_space_storage_base_t::execution_space_instance();
+  }
+};
+
+// </editor-fold> end Fully type-erased GraphNodeImpl }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="Type-erased predecessor GraphNodeImpl"> {{{1
+
+// Specialization for the case with the concrete type of the kernel, but the
+// predecessor erased.
+template <class ExecutionSpace, class Kernel>
+struct GraphNodeImpl<ExecutionSpace, Kernel,
+                     Kokkos::Experimental::TypeErasedTag>
+    : GraphNodeImpl<ExecutionSpace, Kokkos::Experimental::TypeErasedTag,
+                    Kokkos::Experimental::TypeErasedTag> {
+ private:
+  using base_t =
+      GraphNodeImpl<ExecutionSpace, Kokkos::Experimental::TypeErasedTag,
+                    Kokkos::Experimental::TypeErasedTag>;
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="public member types"> {{{2
+
+  using node_ref_t =
+      Kokkos::Experimental::GraphNodeRef<ExecutionSpace, Kernel,
+                                         Kokkos::Experimental::TypeErasedTag>;
+  using kernel_type = Kernel;
+
+  // </editor-fold> end public member types }}}2
+  //----------------------------------------------------------------------------
+
+ private:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="private data members"> {{{2
+
+  Kernel m_kernel;
+
+  // </editor-fold> end private data members }}}2
+  //----------------------------------------------------------------------------
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Ctors, destructors, and assignment"> {{{2
+
+  template <class KernelDeduced>
+  GraphNodeImpl(ExecutionSpace const& ex, _graph_node_kernel_ctor_tag,
+                KernelDeduced&& arg_kernel)
+      : base_t(ex), m_kernel((KernelDeduced &&) arg_kernel) {}
+
+  template <class... Args>
+  GraphNodeImpl(ExecutionSpace const& ex, _graph_node_is_root_ctor_tag,
+                Args&&... args)
+      : base_t(ex, _graph_node_is_root_ctor_tag{}, (Args &&) args...) {}
+
+  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+  // <editor-fold desc="Rule of 6 for not copyable or movable"> {{{3
+
+  // Not copyable or movable
+  GraphNodeImpl()                     = delete;
+  GraphNodeImpl(GraphNodeImpl const&) = delete;
+  GraphNodeImpl(GraphNodeImpl&&)      = delete;
+  GraphNodeImpl& operator=(GraphNodeImpl const&) = delete;
+  GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
+  ~GraphNodeImpl() override                 = default;
+
+  // </editor-fold> end Rule of 6 for not copyable or movable }}}3
+  //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+  // </editor-fold> end Ctors, destructors, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="member accessors"> {{{2
+
+  // Reference qualified to prevent dangling reference to data member
+  Kernel& get_kernel() & { return m_kernel; }
+  Kernel const& get_kernel() const& { return m_kernel; }
+  Kernel&& get_kernel() && = delete;
+
+  // </editor-fold> end member accessors }}}2
+  //----------------------------------------------------------------------------
+};
+
+// </editor-fold> end Type-erased predecessor GraphNodeImpl }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="Fully concrete GraphNodeImpl"> {{{1
+
+// Specialization for the case where nothing is type-erased
+template <class ExecutionSpace, class Kernel, class PredecessorRef>
+struct GraphNodeImpl
+    : GraphNodeImpl<ExecutionSpace, Kernel,
+                    Kokkos::Experimental::TypeErasedTag>,
+      GraphNodeBackendDetailsBeforeTypeErasure<ExecutionSpace, Kernel,
+                                               PredecessorRef> {
+ private:
+  using base_t = GraphNodeImpl<ExecutionSpace, Kernel,
+                               Kokkos::Experimental::TypeErasedTag>;
+  using backend_details_base_t =
+      GraphNodeBackendDetailsBeforeTypeErasure<ExecutionSpace, Kernel,
+                                               PredecessorRef>;
+  // The fully type-erased base type, for the destroy function
+  using type_erased_base_t =
+      GraphNodeImpl<ExecutionSpace, Kokkos::Experimental::TypeErasedTag,
+                    Kokkos::Experimental::TypeErasedTag>;
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="public data members"> {{{2
+
+  using node_ref_t = Kokkos::Experimental::GraphNodeRef<ExecutionSpace, Kernel,
+                                                        PredecessorRef>;
+
+  // </editor-fold> end public data members }}}2
+  //----------------------------------------------------------------------------
+
+ private:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="private data members"> {{{2
+
+  PredecessorRef m_predecessor_ref;
+
+  // </editor-fold> end private data members }}}2
+  //----------------------------------------------------------------------------
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Ctors, destructors, and assignment"> {{{2
+
+  // Not copyable or movable
+  GraphNodeImpl()                     = delete;
+  GraphNodeImpl(GraphNodeImpl const&) = delete;
+  GraphNodeImpl(GraphNodeImpl&&)      = delete;
+  GraphNodeImpl& operator=(GraphNodeImpl const&) = delete;
+  GraphNodeImpl& operator=(GraphNodeImpl&&) = delete;
+  ~GraphNodeImpl() override                 = default;
+
+  // Normal kernel-and-predecessor constructor
+  template <class KernelDeduced, class PredecessorPtrDeduced>
+  GraphNodeImpl(ExecutionSpace const& ex, _graph_node_kernel_ctor_tag,
+                KernelDeduced&& arg_kernel, _graph_node_predecessor_ctor_tag,
+                PredecessorPtrDeduced&& arg_predecessor)
+      : base_t(ex, _graph_node_kernel_ctor_tag{},
+               (KernelDeduced &&) arg_kernel),
+        // The backend gets the ability to store (weak, non-owning) references
+        // to the kernel in it's final resting place here if it wants. The
+        // predecessor is already a pointer, so it doesn't matter that it isn't
+        // already at its final address
+        backend_details_base_t(ex, this->base_t::get_kernel(), arg_predecessor,
+                               *this),
+        m_predecessor_ref((PredecessorPtrDeduced &&) arg_predecessor) {}
+
+  // Root-tagged constructor
+  template <class... Args>
+  GraphNodeImpl(ExecutionSpace const& ex, _graph_node_is_root_ctor_tag,
+                Args&&... args)
+      : base_t(ex, _graph_node_is_root_ctor_tag{}, (Args &&) args...),
+        backend_details_base_t(ex, _graph_node_is_root_ctor_tag{}, *this),
+        m_predecessor_ref() {}
+
+  // </editor-fold> end Ctors, destructors, and assignment }}}2
+  //------------------------------------------------------------------------------
+};
+
+// </editor-fold> end Fully concrete GraphNodeImpl }}}1
+//==============================================================================
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_IMPL_GRAPHNODEIMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HBWSpace.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HBWSpace.cpp
new file mode 100644 (file)
index 0000000..d533ec0
--- /dev/null
@@ -0,0 +1,384 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdint>
+#include <cstring>
+
+#include <iostream>
+#include <sstream>
+#include <cstring>
+#include <algorithm>
+
+#include <Kokkos_HBWSpace.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+#include <Kokkos_Atomic.hpp>
+#ifdef KOKKOS_ENABLE_HBWSPACE
+#include <memkind.h>
+#endif
+
+#include <impl/Kokkos_Tools.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+#ifdef KOKKOS_ENABLE_HBWSPACE
+#define MEMKIND_TYPE MEMKIND_HBW  // hbw_get_kind(HBW_PAGESIZE_4KB)
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Experimental {
+
+/* Default allocation mechanism */
+HBWSpace::HBWSpace() : m_alloc_mech(HBWSpace::STD_MALLOC) {
+  printf("Init\n");
+  setenv("MEMKIND_HBW_NODES", "1", 0);
+}
+
+/* Default allocation mechanism */
+HBWSpace::HBWSpace(const HBWSpace::AllocationMechanism &arg_alloc_mech)
+    : m_alloc_mech(HBWSpace::STD_MALLOC) {
+  printf("Init2\n");
+  setenv("MEMKIND_HBW_NODES", "1", 0);
+  if (arg_alloc_mech == STD_MALLOC) {
+    m_alloc_mech = HBWSpace::STD_MALLOC;
+  }
+}
+
+void *HBWSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void *HBWSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
+                         const size_t arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void *HBWSpace::impl_allocate(
+    const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  static_assert(sizeof(void *) == sizeof(uintptr_t),
+                "Error sizeof(void*) != sizeof(uintptr_t)");
+
+  static_assert(
+      Kokkos::Impl::power_of_two<Kokkos::Impl::MEMORY_ALIGNMENT>::value,
+      "Memory alignment must be power of two");
+
+  constexpr uintptr_t alignment      = Kokkos::Impl::MEMORY_ALIGNMENT;
+  constexpr uintptr_t alignment_mask = alignment - 1;
+
+  void *ptr = nullptr;
+
+  if (arg_alloc_size) {
+    if (m_alloc_mech == STD_MALLOC) {
+      // Over-allocate to and round up to guarantee proper alignment.
+      size_t size_padded = arg_alloc_size + sizeof(void *) + alignment;
+
+      void *alloc_ptr = memkind_malloc(MEMKIND_TYPE, size_padded);
+
+      if (alloc_ptr) {
+        uintptr_t address = reinterpret_cast<uintptr_t>(alloc_ptr);
+
+        // offset enough to record the alloc_ptr
+        address += sizeof(void *);
+        uintptr_t rem    = address % alignment;
+        uintptr_t offset = rem ? (alignment - rem) : 0u;
+        address += offset;
+        ptr = reinterpret_cast<void *>(address);
+        // record the alloc'd pointer
+        address -= sizeof(void *);
+        *reinterpret_cast<void **>(address) = alloc_ptr;
+      }
+    }
+  }
+
+  if ((ptr == nullptr) || (reinterpret_cast<uintptr_t>(ptr) == ~uintptr_t(0)) ||
+      (reinterpret_cast<uintptr_t>(ptr) & alignment_mask)) {
+    std::ostringstream msg;
+    msg << "Kokkos::Experimental::HBWSpace::allocate[ ";
+    switch (m_alloc_mech) {
+      case STD_MALLOC: msg << "STD_MALLOC"; break;
+      case POSIX_MEMALIGN: msg << "POSIX_MEMALIGN"; break;
+      case POSIX_MMAP: msg << "POSIX_MMAP"; break;
+      case INTEL_MM_ALLOC: msg << "INTEL_MM_ALLOC"; break;
+    }
+    msg << " ]( " << arg_alloc_size << " ) FAILED";
+    if (ptr == nullptr) {
+      msg << " nullptr";
+    } else {
+      msg << " NOT ALIGNED " << ptr;
+    }
+
+    std::cerr << msg.str() << std::endl;
+    std::cerr.flush();
+
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    const size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+
+  return ptr;
+}
+
+void HBWSpace::deallocate(void *const arg_alloc_ptr,
+                          const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+void HBWSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
+                          const size_t arg_alloc_size,
+                          const size_t arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HBWSpace::impl_deallocate(
+    const char *arg_label, void *const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (arg_alloc_ptr) {
+    if (Kokkos::Profiling::profileLibraryLoaded()) {
+      const size_t reported_size =
+          (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+      Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                        reported_size);
+    }
+
+    if (m_alloc_mech == STD_MALLOC) {
+      void *alloc_ptr = *(reinterpret_cast<void **>(arg_alloc_ptr) - 1);
+      memkind_free(MEMKIND_TYPE, alloc_ptr);
+    }
+  }
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_DEBUG
+SharedAllocationRecord<void, void>
+    SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::s_root_record;
+#endif
+
+void SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::deallocate(
+    SharedAllocationRecord<void, void> *arg_rec) {
+  delete static_cast<SharedAllocationRecord *>(arg_rec);
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HBWSpace,
+                       void>::~SharedAllocationRecord()
+#if defined( \
+    KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
+    noexcept
+#endif
+{
+
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     SharedAllocationRecord<void, void>::m_alloc_size,
+                     (SharedAllocationRecord<void, void>::m_alloc_size -
+                      sizeof(SharedAllocationHeader)));
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::
+    SharedAllocationRecord(
+        const Kokkos::Experimental::HBWSpace &arg_space,
+        const std::string &arg_label, const size_t arg_alloc_size,
+        const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : SharedAllocationRecord<void, void>(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::Experimental::HBWSpace,
+                                  void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+  // Fill in the Header information
+  RecordBase::m_alloc_ptr->m_record =
+      static_cast<SharedAllocationRecord<void, void> *>(this);
+
+  strncpy(RecordBase::m_alloc_ptr->m_label, arg_label.c_str(),
+          SharedAllocationHeader::maximum_label_length - 1);
+  // Set last element zero, in case c_str is too long
+  RecordBase::m_alloc_ptr
+      ->m_label[SharedAllocationHeader::maximum_label_length - 1] = '\0';
+}
+
+//----------------------------------------------------------------------------
+
+void *
+SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::allocate_tracked(
+    const Kokkos::Experimental::HBWSpace &arg_space,
+    const std::string &arg_alloc_label, const size_t arg_alloc_size) {
+  if (!arg_alloc_size) return nullptr;
+
+  SharedAllocationRecord *const r =
+      allocate(arg_space, arg_alloc_label, arg_alloc_size);
+
+  RecordBase::increment(r);
+
+  return r->data();
+}
+
+void SharedAllocationRecord<Kokkos::Experimental::HBWSpace,
+                            void>::deallocate_tracked(void *const
+                                                          arg_alloc_ptr) {
+  if (arg_alloc_ptr != nullptr) {
+    SharedAllocationRecord *const r = get_record(arg_alloc_ptr);
+
+    RecordBase::decrement(r);
+  }
+}
+
+void *SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::
+    reallocate_tracked(void *const arg_alloc_ptr, const size_t arg_alloc_size) {
+  SharedAllocationRecord *const r_old = get_record(arg_alloc_ptr);
+  SharedAllocationRecord *const r_new =
+      allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
+
+  Kokkos::Impl::DeepCopy<Kokkos::Experimental::HBWSpace,
+                         Kokkos::Experimental::HBWSpace>(
+      r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
+  Kokkos::fence(
+      "SharedAllocationRecord<Kokkos::Experimental::HBWSpace, "
+      "void>::reallocate_tracked(): fence after copying data");
+
+  RecordBase::increment(r_new);
+  RecordBase::decrement(r_old);
+
+  return r_new->data();
+}
+
+SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>
+    *SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::get_record(
+        void *alloc_ptr) {
+  using Header = SharedAllocationHeader;
+  using RecordHost =
+      SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>;
+
+  SharedAllocationHeader const *const head =
+      alloc_ptr ? Header::get_header(alloc_ptr) : nullptr;
+  RecordHost *const record =
+      head ? static_cast<RecordHost *>(head->m_record) : nullptr;
+
+  if (!alloc_ptr || record->m_alloc_ptr != head) {
+    Kokkos::Impl::throw_runtime_exception(std::string(
+        "Kokkos::Impl::SharedAllocationRecord< Kokkos::Experimental::HBWSpace "
+        ", void >::get_record ERROR"));
+  }
+
+  return record;
+}
+
+// Iterate records to print orphaned memory ...
+void SharedAllocationRecord<Kokkos::Experimental::HBWSpace, void>::
+    print_records(std::ostream &s, const Kokkos::Experimental::HBWSpace &space,
+                  bool detail) {
+#ifdef KOKKOS_ENABLE_DEBUG
+  SharedAllocationRecord<void, void>::print_host_accessible_records(
+      s, "HBWSpace", &s_root_record, detail);
+#else
+  throw_runtime_exception(
+      "SharedAllocationRecord<HBWSpace>::print_records"
+      " only works with KOKKOS_ENABLE_DEBUG enabled");
+#endif
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Experimental {
+namespace {
+const unsigned HBW_SPACE_ATOMIC_MASK     = 0xFFFF;
+const unsigned HBW_SPACE_ATOMIC_XOR_MASK = 0x5A39;
+static int HBW_SPACE_ATOMIC_LOCKS[HBW_SPACE_ATOMIC_MASK + 1];
+}  // namespace
+
+namespace Impl {
+void init_lock_array_hbw_space() {
+  static int is_initialized = 0;
+  if (!is_initialized)
+    for (int i = 0; i < static_cast<int>(HBW_SPACE_ATOMIC_MASK + 1); i++)
+      HBW_SPACE_ATOMIC_LOCKS[i] = 0;
+}
+
+bool lock_address_hbw_space(void *ptr) {
+  return 0 == atomic_compare_exchange(
+                  &HBW_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) &
+                                           HBW_SPACE_ATOMIC_MASK) ^
+                                          HBW_SPACE_ATOMIC_XOR_MASK],
+                  0, 1);
+}
+
+void unlock_address_hbw_space(void *ptr) {
+  atomic_exchange(
+      &HBW_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) & HBW_SPACE_ATOMIC_MASK) ^
+                              HBW_SPACE_ATOMIC_XOR_MASK],
+      0);
+}
+
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostBarrier.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostBarrier.cpp
new file mode 100644 (file)
index 0000000..9ad2dae
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <impl/Kokkos_HostBarrier.hpp>
+#include <impl/Kokkos_BitOps.hpp>
+
+#include <impl/Kokkos_HostBarrier.hpp>
+
+#include <thread>
+#if defined(_WIN32)
+#include <process.h>
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+void HostBarrier::impl_backoff_wait_until_equal(
+    int* ptr, const int v, const bool active_wait) noexcept {
+  unsigned count = 0u;
+
+  while (!test_equal(ptr, v)) {
+    const int c = int_log2(++count);
+    if (!active_wait || c > log2_iterations_till_sleep) {
+      std::this_thread::sleep_for(
+          std::chrono::nanoseconds(c < 16 ? 256 * c : 4096));
+    } else if (c > log2_iterations_till_yield) {
+      std::this_thread::yield();
+    }
+#if defined(KOKKOS_ENABLE_ASM)
+#if defined(__PPC64__)
+    for (int j = 0; j < num_nops; ++j) {
+      asm volatile("nop\n");
+    }
+    asm volatile("or 27, 27, 27" ::: "memory");
+#elif defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
+    defined(__x86_64__)
+    for (int j = 0; j < num_nops; ++j) {
+      asm volatile("nop\n");
+    }
+    asm volatile("pause\n" ::: "memory");
+#endif
+#endif
+  }
+}
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostBarrier.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostBarrier.hpp
new file mode 100644 (file)
index 0000000..4914294
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_HOST_BARRIER_HPP
+#define KOKKOS_HOST_BARRIER_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Atomic.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// class HostBarrier
+//
+// provides a static and member interface for a barrier shared between threads
+// of execution.
+//
+// *buffer* is a shared resource between the threads of execution
+// *step* should be a stack variable associated with the current thread of
+// execution *size* is the number of threads which share the barrier
+//
+// before calling any arrive type function the buffer and step must have been
+// initialized to 0 and one of the following conditions must be true
+//
+// 1) step == 0 (i.e. first arrive call to HostBarrier),
+// 2) try_wait has returned true for the current thread of execution,
+// 3) a wait type function has returned for the current thread of execution, or
+// 4) split_arrive returned true on the current thread of execution and it has
+//    called split_release
+//
+// The purporse of the split functions is to allow the last thread to arrive
+// an opportunity to perform some actions before releasing the waiting threads
+//
+// If all threads have arrived (and split_release has been call if using
+// split_arrive) before a wait type call, the wait may return quickly
+class HostBarrier {
+ public:
+  using buffer_type                         = int;
+  static constexpr int required_buffer_size = 128;
+  static constexpr int required_buffer_length =
+      required_buffer_size / sizeof(int);
+
+ private:
+  // fit the following 3 atomics within a 128 bytes while
+  // keeping the arrive atomic at least 64 bytes away from
+  // the wait atomic to reduce contention on the caches
+  static constexpr int arrive_idx = 32 / sizeof(int);
+  static constexpr int master_idx = 64 / sizeof(int);
+  static constexpr int wait_idx   = 96 / sizeof(int);
+
+  static constexpr int num_nops                   = 32;
+  static constexpr int iterations_till_backoff    = 64;
+  static constexpr int log2_iterations_till_yield = 4;
+  static constexpr int log2_iterations_till_sleep = 6;
+
+ public:
+  // will return true if call is the last thread to arrive
+  KOKKOS_INLINE_FUNCTION
+  static bool split_arrive(int* buffer, const int size, int& step,
+                           const bool master_wait = true) noexcept {
+    if (size <= 1) return true;
+
+    ++step;
+    Kokkos::memory_fence();
+    const bool result =
+        Kokkos::atomic_fetch_add(buffer + arrive_idx, 1) == size - 1;
+
+    if (master_wait && result) {
+      Kokkos::atomic_fetch_add(buffer + master_idx, 1);
+    }
+
+    return result;
+  }
+
+  // release waiting threads
+  // only the thread which received a return value of true from split_arrive
+  // or the thread which calls split_master_wait may call split_release
+  KOKKOS_INLINE_FUNCTION
+  static void split_release(int* buffer, const int size, const int /*step*/
+                            ) noexcept {
+    if (size <= 1) return;
+    Kokkos::memory_fence();
+    Kokkos::atomic_fetch_sub(buffer + arrive_idx, size);
+    Kokkos::atomic_fetch_add(buffer + wait_idx, 1);
+  }
+
+  // should only be called by the master thread, will allow the master thread to
+  // resume after all threads have arrived
+  KOKKOS_INLINE_FUNCTION
+  static void split_master_wait(int* buffer, const int size, const int step,
+                                const bool active_wait = true) noexcept {
+    if (size <= 1) return;
+    wait_until_equal(buffer + master_idx, step, active_wait);
+  }
+
+  // arrive, last thread automatically release waiting threads
+  KOKKOS_INLINE_FUNCTION
+  static void arrive(int* buffer, const int size, int& step) noexcept {
+    if (size <= 1) return;
+    if (split_arrive(buffer, size, step)) {
+      split_release(buffer, size, step);
+    }
+  }
+
+  // test if all threads have arrived
+  KOKKOS_INLINE_FUNCTION
+  static bool try_wait(int* buffer, const int size, const int step) noexcept {
+    if (size <= 1) return true;
+    return test_equal(buffer + wait_idx, step);
+  }
+
+  // wait for all threads to arrive
+  KOKKOS_INLINE_FUNCTION
+  static void wait(int* buffer, const int size, const int step,
+                   bool active_wait = true) noexcept {
+    if (size <= 1) return;
+    wait_until_equal(buffer + wait_idx, step, active_wait);
+  }
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  bool split_arrive(const bool master_wait = true) const noexcept {
+    return split_arrive(m_buffer, m_size, m_step, master_wait);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void split_release() const noexcept {
+    split_release(m_buffer, m_size, m_step);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void split_master_wait(const bool active_wait = true) noexcept {
+    split_master_wait(m_buffer, m_size, m_step, active_wait);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void arrive() const noexcept { return arrive(m_buffer, m_size, m_step); }
+
+  KOKKOS_INLINE_FUNCTION
+  bool try_wait() const noexcept { return try_wait(m_buffer, m_size, m_step); }
+
+  KOKKOS_INLINE_FUNCTION
+  void wait() const noexcept { wait(m_buffer, m_size, m_step); }
+
+  HostBarrier()              = default;
+  HostBarrier(HostBarrier&&) = default;
+  HostBarrier& operator=(HostBarrier&&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  HostBarrier(int size, int* buffer)
+      : m_size{size}, m_step{0u}, m_buffer{buffer} {}
+
+  HostBarrier(const HostBarrier&) = delete;
+  HostBarrier& operator=(const HostBarrier&) = delete;
+
+ private:
+  KOKKOS_INLINE_FUNCTION
+  static bool test_equal(int* ptr, int v) noexcept {
+    const bool result = Kokkos::atomic_fetch_add(ptr, 0) == v;
+    if (result) {
+      Kokkos::memory_fence();
+    }
+    return result;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void wait_until_equal(int* ptr, const int v,
+                               bool active_wait = true) noexcept {
+    KOKKOS_IF_ON_HOST((impl_wait_until_equal_host(ptr, v, active_wait);))
+
+    KOKKOS_IF_ON_DEVICE(((void)active_wait; while (!test_equal(ptr, v)){}))
+  }
+
+  static void impl_wait_until_equal_host(int* ptr, const int v,
+                                         bool active_wait = true) noexcept {
+    bool result = test_equal(ptr, v);
+    for (int i = 0; !result && i < iterations_till_backoff; ++i) {
+#if defined(KOKKOS_ENABLE_ASM)
+#if defined(_WIN32)
+      for (int j = 0; j < num_nops; ++j) {
+        __asm__ __volatile__("nop\n");
+      }
+      __asm__ __volatile__("pause\n" ::: "memory");
+#elif defined(__PPC64__)
+      for (int j = 0; j < num_nops; ++j) {
+        asm volatile("nop\n");
+      }
+      asm volatile("or 27, 27, 27" ::: "memory");
+#elif defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
+    defined(__x86_64__)
+      for (int j = 0; j < num_nops; ++j) {
+        asm volatile("nop\n");
+      }
+      asm volatile("pause\n" ::: "memory");
+#endif
+#endif
+      result = test_equal(ptr, v);
+    }
+    if (!result) {
+      impl_backoff_wait_until_equal(ptr, v, active_wait);
+    }
+  }
+
+  static void impl_backoff_wait_until_equal(int* ptr, const int v,
+                                            const bool active_wait) noexcept;
+
+ private:
+  int m_size{0};
+  mutable int m_step{0};
+  int* m_buffer{nullptr};
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_HOST_BARRIER_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSharedPtr.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSharedPtr.hpp
new file mode 100644 (file)
index 0000000..a2a792a
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_HOST_SHARED_PTR_HPP
+#define KOKKOS_IMPL_HOST_SHARED_PTR_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+#include <functional>
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename T>
+class HostSharedPtr {
+ public:
+  using element_type = T;
+
+  KOKKOS_DEFAULTED_FUNCTION constexpr HostSharedPtr() = default;
+  KOKKOS_FUNCTION constexpr HostSharedPtr(std::nullptr_t) {}
+
+  explicit HostSharedPtr(T* element_ptr)
+      : HostSharedPtr(element_ptr, [](T* const t) { delete t; }) {}
+
+  template <class Deleter>
+  HostSharedPtr(T* element_ptr, const Deleter& deleter)
+      : m_element_ptr(element_ptr) {
+#ifdef KOKKOS_ENABLE_CXX17
+    static_assert(std::is_invocable_v<Deleter, T*> &&
+                  std::is_copy_constructible_v<Deleter>);
+#endif
+    if (element_ptr) {
+      try {
+        m_control = new Control{deleter, 1};
+      } catch (...) {
+        deleter(element_ptr);
+        throw;
+      }
+    }
+  }
+
+  KOKKOS_FUNCTION HostSharedPtr(HostSharedPtr&& other) noexcept
+      : m_element_ptr(other.m_element_ptr), m_control(other.m_control) {
+    other.m_element_ptr = nullptr;
+    other.m_control     = nullptr;
+  }
+
+  KOKKOS_FUNCTION HostSharedPtr(const HostSharedPtr& other) noexcept
+      : m_element_ptr(other.m_element_ptr), m_control(other.m_control) {
+    KOKKOS_IF_ON_HOST(
+        (if (m_control) Kokkos::atomic_add(&(m_control->m_counter), 1);))
+    KOKKOS_IF_ON_DEVICE(m_control = nullptr;)
+  }
+
+  KOKKOS_FUNCTION HostSharedPtr& operator=(HostSharedPtr&& other) noexcept {
+    if (&other != this) {
+      cleanup();
+      m_element_ptr       = other.m_element_ptr;
+      other.m_element_ptr = nullptr;
+      m_control           = other.m_control;
+      other.m_control     = nullptr;
+    }
+    return *this;
+  }
+
+  KOKKOS_FUNCTION HostSharedPtr& operator=(
+      const HostSharedPtr& other) noexcept {
+    if (&other != this) {
+      cleanup();
+      m_element_ptr = other.m_element_ptr;
+      m_control     = other.m_control;
+      KOKKOS_IF_ON_HOST(
+          (if (m_control) Kokkos::atomic_add(&(m_control->m_counter), 1);))
+      KOKKOS_IF_ON_DEVICE(m_control = nullptr;)
+    }
+    return *this;
+  }
+
+  KOKKOS_FUNCTION ~HostSharedPtr() { cleanup(); }
+
+  // returns the stored pointer
+  KOKKOS_FUNCTION T* get() const noexcept { return m_element_ptr; }
+  // dereferences the stored pointer
+  KOKKOS_FUNCTION T& operator*() const noexcept {
+    KOKKOS_EXPECTS(bool(*this));
+    return *get();
+  }
+  // dereferences the stored pointer
+  KOKKOS_FUNCTION T* operator->() const noexcept {
+    KOKKOS_EXPECTS(bool(*this));
+    return get();
+  }
+
+  // checks if the stored pointer is not null
+  KOKKOS_FUNCTION explicit operator bool() const noexcept {
+    return get() != nullptr;
+  }
+
+  // returns the number of HostSharedPtr instances managing the current object
+  // or 0 if there is no managed object.
+  int use_count() const noexcept {
+    return m_control ? m_control->m_counter : 0;
+  }
+
+ private:
+  KOKKOS_FUNCTION void cleanup() noexcept {
+    KOKKOS_IF_ON_HOST((
+        // If m_counter is set, then this instance is responsible for managing
+        // the object pointed to by m_counter and m_element_ptr.
+        if (m_control) {
+          int const count =
+              Kokkos::atomic_fetch_sub(&(m_control->m_counter), 1);
+          // atomic_fetch_sub might have memory order relaxed, so we need to
+          // force synchronization to avoid multiple threads doing the cleanup.
+          Kokkos::memory_fence();
+          if (count == 1) {
+            (m_control->m_deleter)(m_element_ptr);
+            m_element_ptr = nullptr;
+            delete m_control;
+            m_control = nullptr;
+          }
+        }))
+  }
+
+  struct Control {
+    std::function<void(T*)> m_deleter;
+    int m_counter;
+  };
+
+  T* m_element_ptr   = nullptr;
+  Control* m_control = nullptr;
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace.cpp
new file mode 100644 (file)
index 0000000..4bf9048
--- /dev/null
@@ -0,0 +1,373 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_MemorySpace.hpp>
+#include <impl/Kokkos_Tools.hpp>
+
+/*--------------------------------------------------------------------------*/
+
+#if defined(KOKKOS_COMPILER_INTEL) && !defined(KOKKOS_ENABLE_CUDA)
+
+// Intel specialized allocator does not interoperate with CUDA memory allocation
+
+#define KOKKOS_ENABLE_INTEL_MM_ALLOC
+
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+#include <cstddef>
+#include <cstdlib>
+#include <cstdint>
+#include <cstring>
+
+#include <iostream>
+#include <sstream>
+#include <cstring>
+
+#include <Kokkos_HostSpace.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <Kokkos_Atomic.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+/* Default allocation mechanism */
+HostSpace::HostSpace()
+    : m_alloc_mech(
+#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
+          HostSpace::INTEL_MM_ALLOC
+#else
+          HostSpace::STD_MALLOC
+#endif
+      ) {
+}
+
+/* Default allocation mechanism */
+HostSpace::HostSpace(const HostSpace::AllocationMechanism &arg_alloc_mech)
+    : m_alloc_mech(HostSpace::STD_MALLOC) {
+  if (arg_alloc_mech == STD_MALLOC) {
+    m_alloc_mech = HostSpace::STD_MALLOC;
+  }
+#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
+  else if (arg_alloc_mech == HostSpace::INTEL_MM_ALLOC) {
+    m_alloc_mech = HostSpace::INTEL_MM_ALLOC;
+  }
+#endif
+  else {
+    const char *const mech =
+        (arg_alloc_mech == HostSpace::INTEL_MM_ALLOC)
+            ? "INTEL_MM_ALLOC"
+            : ((arg_alloc_mech == HostSpace::POSIX_MMAP) ? "POSIX_MMAP" : "");
+
+    std::string msg;
+    msg.append("Kokkos::HostSpace ");
+    msg.append(mech);
+    msg.append(" is not available");
+    Kokkos::Impl::throw_runtime_exception(msg);
+  }
+}
+
+void *HostSpace::allocate(const size_t arg_alloc_size) const {
+  return allocate("[unlabeled]", arg_alloc_size);
+}
+void *HostSpace::allocate(const char *arg_label, const size_t arg_alloc_size,
+                          const size_t
+
+                              arg_logical_size) const {
+  return impl_allocate(arg_label, arg_alloc_size, arg_logical_size);
+}
+void *HostSpace::impl_allocate(
+    const char *arg_label, const size_t arg_alloc_size,
+    const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  const size_t reported_size =
+      (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+  static_assert(sizeof(void *) == sizeof(uintptr_t),
+                "Error sizeof(void*) != sizeof(uintptr_t)");
+
+  static_assert(
+      Kokkos::Impl::is_integral_power_of_two(Kokkos::Impl::MEMORY_ALIGNMENT),
+      "Memory alignment must be power of two");
+
+  constexpr uintptr_t alignment      = Kokkos::Impl::MEMORY_ALIGNMENT;
+  constexpr uintptr_t alignment_mask = alignment - 1;
+
+  void *ptr = nullptr;
+
+  if (arg_alloc_size) {
+    if (m_alloc_mech == STD_MALLOC) {
+      // Over-allocate to and round up to guarantee proper alignment.
+      size_t size_padded = arg_alloc_size + sizeof(void *) + alignment;
+
+      void *alloc_ptr = malloc(size_padded);
+
+      if (alloc_ptr) {
+        auto address = reinterpret_cast<uintptr_t>(alloc_ptr);
+
+        // offset enough to record the alloc_ptr
+        address += sizeof(void *);
+        uintptr_t rem    = address % alignment;
+        uintptr_t offset = rem ? (alignment - rem) : 0u;
+        address += offset;
+        ptr = reinterpret_cast<void *>(address);
+        // record the alloc'd pointer
+        address -= sizeof(void *);
+        *reinterpret_cast<void **>(address) = alloc_ptr;
+      }
+    }
+#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
+    else if (m_alloc_mech == INTEL_MM_ALLOC) {
+      ptr = _mm_malloc(arg_alloc_size, alignment);
+    }
+#endif
+  }
+
+  if ((ptr == nullptr) || (reinterpret_cast<uintptr_t>(ptr) == ~uintptr_t(0)) ||
+      (reinterpret_cast<uintptr_t>(ptr) & alignment_mask)) {
+    Experimental::RawMemoryAllocationFailure::FailureMode failure_mode =
+        Experimental::RawMemoryAllocationFailure::FailureMode::
+            AllocationNotAligned;
+    if (ptr == nullptr) {
+      failure_mode = Experimental::RawMemoryAllocationFailure::FailureMode::
+          OutOfMemoryError;
+    }
+
+    Experimental::RawMemoryAllocationFailure::AllocationMechanism alloc_mec =
+        Experimental::RawMemoryAllocationFailure::AllocationMechanism::
+            StdMalloc;
+    switch (m_alloc_mech) {
+      case STD_MALLOC: break;  // default
+      case POSIX_MEMALIGN:
+        alloc_mec = Experimental::RawMemoryAllocationFailure::
+            AllocationMechanism::PosixMemAlign;
+        break;
+      case POSIX_MMAP:
+        alloc_mec = Experimental::RawMemoryAllocationFailure::
+            AllocationMechanism::PosixMMap;
+        break;
+      case INTEL_MM_ALLOC:
+        alloc_mec = Experimental::RawMemoryAllocationFailure::
+            AllocationMechanism::IntelMMAlloc;
+        break;
+    }
+
+    throw Kokkos::Experimental::RawMemoryAllocationFailure(
+        arg_alloc_size, alignment, failure_mode, alloc_mec);
+  }
+  if (Kokkos::Profiling::profileLibraryLoaded()) {
+    Kokkos::Profiling::allocateData(arg_handle, arg_label, ptr, reported_size);
+  }
+  return ptr;
+}
+
+void HostSpace::deallocate(void *const arg_alloc_ptr,
+                           const size_t arg_alloc_size) const {
+  deallocate("[unlabeled]", arg_alloc_ptr, arg_alloc_size);
+}
+
+void HostSpace::deallocate(const char *arg_label, void *const arg_alloc_ptr,
+                           const size_t arg_alloc_size,
+                           const size_t
+
+                               arg_logical_size) const {
+  impl_deallocate(arg_label, arg_alloc_ptr, arg_alloc_size, arg_logical_size);
+}
+void HostSpace::impl_deallocate(
+    const char *arg_label, void *const arg_alloc_ptr,
+    const size_t arg_alloc_size, const size_t arg_logical_size,
+    const Kokkos::Tools::SpaceHandle arg_handle) const {
+  if (arg_alloc_ptr) {
+    Kokkos::fence("HostSpace::impl_deallocate before free");
+    size_t reported_size =
+        (arg_logical_size > 0) ? arg_logical_size : arg_alloc_size;
+    if (Kokkos::Profiling::profileLibraryLoaded()) {
+      Kokkos::Profiling::deallocateData(arg_handle, arg_label, arg_alloc_ptr,
+                                        reported_size);
+    }
+    if (m_alloc_mech == STD_MALLOC) {
+      void *alloc_ptr = *(reinterpret_cast<void **>(arg_alloc_ptr) - 1);
+      free(alloc_ptr);
+    }
+#if defined(KOKKOS_ENABLE_INTEL_MM_ALLOC)
+    else if (m_alloc_mech == INTEL_MM_ALLOC) {
+      _mm_free(arg_alloc_ptr);
+    }
+#endif
+  }
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_ENABLE_DEBUG
+SharedAllocationRecord<void, void>
+    SharedAllocationRecord<Kokkos::HostSpace, void>::s_root_record;
+#endif
+
+SharedAllocationRecord<Kokkos::HostSpace, void>::~SharedAllocationRecord()
+#if defined( \
+    KOKKOS_IMPL_INTEL_WORKAROUND_NOEXCEPT_SPECIFICATION_VIRTUAL_FUNCTION)
+    noexcept
+#endif
+{
+  m_space.deallocate(m_label.c_str(),
+                     SharedAllocationRecord<void, void>::m_alloc_ptr,
+                     SharedAllocationRecord<void, void>::m_alloc_size,
+                     (SharedAllocationRecord<void, void>::m_alloc_size -
+                      sizeof(SharedAllocationHeader)));
+}
+
+SharedAllocationHeader *_do_allocation(Kokkos::HostSpace const &space,
+                                       std::string const &label,
+                                       size_t alloc_size) {
+  try {
+    return reinterpret_cast<SharedAllocationHeader *>(
+        space.allocate(alloc_size));
+  } catch (Experimental::RawMemoryAllocationFailure const &failure) {
+    if (failure.failure_mode() == Experimental::RawMemoryAllocationFailure::
+                                      FailureMode::AllocationNotAligned) {
+      // TODO: delete the misaligned memory
+    }
+
+    std::cerr << "Kokkos failed to allocate memory for label \"" << label
+              << "\".  Allocation using MemorySpace named \"" << space.name()
+              << " failed with the following error:  ";
+    failure.print_error_message(std::cerr);
+    std::cerr.flush();
+    Kokkos::Impl::throw_runtime_exception("Memory allocation failure");
+  }
+  return nullptr;  // unreachable
+}
+
+SharedAllocationRecord<Kokkos::HostSpace, void>::SharedAllocationRecord(
+    const Kokkos::HostSpace &arg_space, const std::string &arg_label,
+    const size_t arg_alloc_size,
+    const SharedAllocationRecord<void, void>::function_type arg_dealloc)
+    // Pass through allocated [ SharedAllocationHeader , user_memory ]
+    // Pass through deallocation function
+    : base_t(
+#ifdef KOKKOS_ENABLE_DEBUG
+          &SharedAllocationRecord<Kokkos::HostSpace, void>::s_root_record,
+#endif
+          Impl::checked_allocation_with_header(arg_space, arg_label,
+                                               arg_alloc_size),
+          sizeof(SharedAllocationHeader) + arg_alloc_size, arg_dealloc,
+          arg_label),
+      m_space(arg_space) {
+  this->base_t::_fill_host_accessible_header_info(*RecordBase::m_alloc_ptr,
+                                                  arg_label);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace {
+const unsigned HOST_SPACE_ATOMIC_MASK     = 0xFFFF;
+const unsigned HOST_SPACE_ATOMIC_XOR_MASK = 0x5A39;
+static int HOST_SPACE_ATOMIC_LOCKS[HOST_SPACE_ATOMIC_MASK + 1];
+}  // namespace
+
+namespace Impl {
+void init_lock_array_host_space() {
+  static int is_initialized = 0;
+  if (!is_initialized)
+    for (int i = 0; i < static_cast<int>(HOST_SPACE_ATOMIC_MASK + 1); i++)
+      HOST_SPACE_ATOMIC_LOCKS[i] = 0;
+}
+
+bool lock_address_host_space(void *ptr) {
+  return 0 == atomic_compare_exchange(
+                  &HOST_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) &
+                                            HOST_SPACE_ATOMIC_MASK) ^
+                                           HOST_SPACE_ATOMIC_XOR_MASK],
+                  0, 1);
+}
+
+void unlock_address_host_space(void *ptr) {
+  atomic_exchange(
+      &HOST_SPACE_ATOMIC_LOCKS[((size_t(ptr) >> 2) & HOST_SPACE_ATOMIC_MASK) ^
+                               HOST_SPACE_ATOMIC_XOR_MASK],
+      0);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//==============================================================================
+// <editor-fold desc="Explicit instantiations of CRTP Base classes"> {{{1
+
+#include <impl/Kokkos_SharedAlloc_timpl.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+// To avoid additional compilation cost for something that's (mostly?) not
+// performance sensitive, we explicity instantiate these CRTP base classes here,
+// where we have access to the associated *_timpl.hpp header files.
+template class SharedAllocationRecordCommon<Kokkos::HostSpace>;
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+// </editor-fold> end Explicit instantiations of CRTP Base classes }}}1
+//==============================================================================
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace_deepcopy.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace_deepcopy.cpp
new file mode 100644 (file)
index 0000000..e6ef732
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include "Kokkos_Core.hpp"
+#include "Kokkos_HostSpace_deepcopy.hpp"
+
+namespace Kokkos {
+
+namespace Impl {
+
+void hostspace_fence(const DefaultHostExecutionSpace& exec) {
+  exec.fence("HostSpace fence");
+}
+
+void hostspace_parallel_deepcopy(void* dst, const void* src, ptrdiff_t n) {
+  Kokkos::DefaultHostExecutionSpace exec;
+  hostspace_parallel_deepcopy_async(exec, dst, src, n);
+}
+
+// DeepCopy called with an execution space that can't access HostSpace
+void hostspace_parallel_deepcopy_async(void* dst, const void* src,
+                                       ptrdiff_t n) {
+  Kokkos::DefaultHostExecutionSpace exec;
+  hostspace_parallel_deepcopy_async(exec, dst, src, n);
+  exec.fence(
+      "Kokkos::Impl::hostspace_parallel_deepcopy_async: fence after copy");
+}
+
+void hostspace_parallel_deepcopy_async(const DefaultHostExecutionSpace& exec,
+                                       void* dst, const void* src,
+                                       ptrdiff_t n) {
+  using policy_t = Kokkos::RangePolicy<Kokkos::DefaultHostExecutionSpace>;
+
+  // If the asynchronous HPX backend is enabled, do *not* copy anything
+  // synchronously. The deep copy must be correctly sequenced with respect to
+  // other kernels submitted to the same instance, so we only use the fallback
+  // parallel_for version in this case.
+#if !(defined(KOKKOS_ENABLE_HPX) && defined(KOKKOS_ENABLE_HPX_ASYNC_DISPATCH))
+  constexpr int host_deep_copy_serial_limit = 10 * 8192;
+  if ((n < host_deep_copy_serial_limit) ||
+      (DefaultHostExecutionSpace().concurrency() == 1)) {
+    std::memcpy(dst, src, n);
+    return;
+  }
+
+  // Both src and dst are aligned the same way with respect to 8 byte words
+  if (reinterpret_cast<ptrdiff_t>(src) % 8 ==
+      reinterpret_cast<ptrdiff_t>(dst) % 8) {
+    char* dst_c       = reinterpret_cast<char*>(dst);
+    const char* src_c = reinterpret_cast<const char*>(src);
+    int count         = 0;
+    // get initial bytes copied
+    while (reinterpret_cast<ptrdiff_t>(dst_c) % 8 != 0) {
+      *dst_c = *src_c;
+      dst_c++;
+      src_c++;
+      count++;
+    }
+
+    // copy the bulk of the data
+    double* dst_p       = reinterpret_cast<double*>(dst_c);
+    const double* src_p = reinterpret_cast<const double*>(src_c);
+    Kokkos::parallel_for("Kokkos::Impl::host_space_deepcopy_double",
+                         policy_t(exec, 0, (n - count) / 8),
+                         [=](const ptrdiff_t i) { dst_p[i] = src_p[i]; });
+
+    // get final data copied
+    dst_c += ((n - count) / 8) * 8;
+    src_c += ((n - count) / 8) * 8;
+    char* dst_end = reinterpret_cast<char*>(dst) + n;
+    while (dst_c != dst_end) {
+      *dst_c = *src_c;
+      dst_c++;
+      src_c++;
+    }
+    return;
+  }
+
+  // Both src and dst are aligned the same way with respect to 4 byte words
+  if (reinterpret_cast<ptrdiff_t>(src) % 4 ==
+      reinterpret_cast<ptrdiff_t>(dst) % 4) {
+    char* dst_c       = reinterpret_cast<char*>(dst);
+    const char* src_c = reinterpret_cast<const char*>(src);
+    int count         = 0;
+    // get initial bytes copied
+    while (reinterpret_cast<ptrdiff_t>(dst_c) % 4 != 0) {
+      *dst_c = *src_c;
+      dst_c++;
+      src_c++;
+      count++;
+    }
+
+    // copy the bulk of the data
+    int32_t* dst_p       = reinterpret_cast<int32_t*>(dst_c);
+    const int32_t* src_p = reinterpret_cast<const int32_t*>(src_c);
+    Kokkos::parallel_for("Kokkos::Impl::host_space_deepcopy_int",
+                         policy_t(exec, 0, (n - count) / 4),
+                         [=](const ptrdiff_t i) { dst_p[i] = src_p[i]; });
+
+    // get final data copied
+    dst_c += ((n - count) / 4) * 4;
+    src_c += ((n - count) / 4) * 4;
+    char* dst_end = reinterpret_cast<char*>(dst) + n;
+    while (dst_c != dst_end) {
+      *dst_c = *src_c;
+      dst_c++;
+      src_c++;
+    }
+    return;
+  }
+#endif
+
+  // Src and dst are not aligned the same way, we can only to byte wise copy.
+  {
+    char* dst_p       = reinterpret_cast<char*>(dst);
+    const char* src_p = reinterpret_cast<const char*>(src);
+    Kokkos::parallel_for("Kokkos::Impl::host_space_deepcopy_char",
+                         policy_t(exec, 0, n),
+                         [=](const ptrdiff_t i) { dst_p[i] = src_p[i]; });
+  }
+}
+
+}  // namespace Impl
+
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace_deepcopy.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostSpace_deepcopy.hpp
new file mode 100644 (file)
index 0000000..88d3767
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
+#define KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
+
+#include <cstdint>
+
+namespace Kokkos {
+
+namespace Impl {
+
+void hostspace_fence(const DefaultHostExecutionSpace& exec);
+
+void hostspace_parallel_deepcopy(void* dst, const void* src, ptrdiff_t n);
+// DeepCopy called with an execution space that can't access HostSpace
+void hostspace_parallel_deepcopy_async(void* dst, const void* src, ptrdiff_t n);
+void hostspace_parallel_deepcopy_async(const DefaultHostExecutionSpace& exec,
+                                       void* dst, const void* src, ptrdiff_t n);
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+#endif  // KOKKOS_IMPL_HOSTSPACE_DEEPCOPY_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostThreadTeam.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostThreadTeam.cpp
new file mode 100644 (file)
index 0000000..1f1acca
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <limits>
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_HostThreadTeam.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Spinwait.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+void HostThreadTeamData::organize_pool(HostThreadTeamData *members[],
+                                       const int size) {
+  bool ok = true;
+
+  memory_fence();
+
+  // Verify not already a member of a pool:
+  for (int rank = 0; rank < size && ok; ++rank) {
+    ok = (nullptr != members[rank]) &&
+         (nullptr == members[rank]->m_pool_scratch);
+  }
+
+  if (ok) {
+    int64_t *const root_scratch = members[0]->m_scratch;
+
+    for (int i = m_pool_rendezvous; i < m_pool_reduce; ++i) {
+      root_scratch[i] = 0;
+    }
+
+    {
+      HostThreadTeamData **const pool = reinterpret_cast<HostThreadTeamData **>(
+          root_scratch + m_pool_members);
+
+      // team size == 1, league size == pool_size
+
+      for (int rank = 0; rank < size; ++rank) {
+        HostThreadTeamData *const mem = members[rank];
+        mem->m_pool_scratch           = root_scratch;
+        mem->m_team_scratch           = mem->m_scratch;
+        mem->m_pool_rank              = rank;
+        mem->m_pool_size              = size;
+        mem->m_team_base              = rank;
+        mem->m_team_rank              = 0;
+        mem->m_team_size              = 1;
+        mem->m_team_alloc             = 1;
+        mem->m_league_rank            = rank;
+        mem->m_league_size            = size;
+        mem->m_team_rendezvous_step   = 0;
+        pool[rank]                    = mem;
+      }
+    }
+
+    Kokkos::memory_fence();
+  } else {
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::HostThreadTeamData::organize_pool ERROR pool already "
+        "exists");
+  }
+}
+
+void HostThreadTeamData::disband_pool() {
+  m_work_range.first     = -1;
+  m_work_range.second    = -1;
+  m_pool_scratch         = nullptr;
+  m_team_scratch         = nullptr;
+  m_pool_rank            = 0;
+  m_pool_size            = 1;
+  m_team_base            = 0;
+  m_team_rank            = 0;
+  m_team_size            = 1;
+  m_team_alloc           = 1;
+  m_league_rank          = 0;
+  m_league_size          = 1;
+  m_team_rendezvous_step = 0;
+}
+
+int HostThreadTeamData::organize_team(const int team_size) {
+  // Pool is initialized
+  const bool ok_pool = nullptr != m_pool_scratch;
+
+  // Team is not set
+  const bool ok_team =
+      m_team_scratch == m_scratch && m_team_base == m_pool_rank &&
+      m_team_rank == 0 && m_team_size == 1 && m_team_alloc == 1 &&
+      m_league_rank == m_pool_rank && m_league_size == m_pool_size;
+
+  if (ok_pool && ok_team) {
+    if (team_size <= 0) return 0;  // No teams to organize
+
+    if (team_size == 1) return 1;  // Already organized in teams of one
+
+    HostThreadTeamData *const *const pool =
+        reinterpret_cast<HostThreadTeamData **>(m_pool_scratch +
+                                                m_pool_members);
+
+    // "league_size" in this context is the number of concurrent teams
+    // that the pool can accommodate.  Excess threads are idle.
+    const int league_size     = m_pool_size / team_size;
+    const int team_alloc_size = m_pool_size / league_size;
+    const int team_alloc_rank = m_pool_rank % team_alloc_size;
+    const int league_rank     = m_pool_rank / team_alloc_size;
+    const int team_base_rank  = league_rank * team_alloc_size;
+
+    m_team_scratch = pool[team_base_rank]->m_scratch;
+    m_team_base    = team_base_rank;
+    // This needs to check overflow, if m_pool_size % team_alloc_size !=0
+    // there are two corner cases:
+    // (i) if team_alloc_size == team_size there might be a non-full
+    //     zombi team around (for example m_pool_size = 5 and team_size = 2
+    // (ii) if team_alloc > team_size then the last team might have less
+    //      threads than the others
+    m_team_rank = (team_base_rank + team_size <= m_pool_size) &&
+                          (team_alloc_rank < team_size)
+                      ? team_alloc_rank
+                      : -1;
+    m_team_size            = team_size;
+    m_team_alloc           = team_alloc_size;
+    m_league_rank          = league_rank;
+    m_league_size          = league_size;
+    m_team_rendezvous_step = 0;
+
+    if (team_base_rank == m_pool_rank) {
+      // Initialize team's rendezvous memory
+      for (int i = m_team_rendezvous; i < m_pool_reduce; ++i) {
+        m_scratch[i] = 0;
+      }
+      // Make sure team's rendezvous memory initialized
+      // is written before proceeding.
+      Kokkos::memory_fence();
+    }
+
+    // Organizing threads into a team performs a barrier across the
+    // entire pool to insure proper initialization of the team
+    // rendezvous mechanism before a team rendezvous can be performed.
+
+    if (pool_rendezvous()) {
+      pool_rendezvous_release();
+    }
+  } else {
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::HostThreadTeamData::organize_team ERROR");
+  }
+
+  return 0 <= m_team_rank;
+}
+
+void HostThreadTeamData::disband_team() {
+  m_team_scratch         = m_scratch;
+  m_team_base            = m_pool_rank;
+  m_team_rank            = 0;
+  m_team_size            = 1;
+  m_team_alloc           = 1;
+  m_league_rank          = m_pool_rank;
+  m_league_size          = m_pool_size;
+  m_team_rendezvous_step = 0;
+}
+
+//----------------------------------------------------------------------------
+
+int HostThreadTeamData::get_work_stealing() noexcept {
+  pair_int_t w(-1, -1);
+
+  // TODO DJS 3-17-2018:
+  // Discover why the work stealing algorithm only works when called
+  // by the master thread of the team.  If we can refactor this section to
+  // remove that requirement we should be able to remove the split_master_wait
+  // behavior in the team and pool rendezvous algorithms
+  if (1 == m_team_size || team_rendezvous()) {
+    // Attempt first from beginning of my work range
+    for (int attempt = m_work_range.first < m_work_range.second; attempt;) {
+      // Query and attempt to update m_work_range
+      //   from: [ w.first     , w.second )
+      //   to:   [ w.first + 1 , w.second ) = w_new
+      //
+      // If w is invalid then is just a query.
+
+      const pair_int_t w_new(w.first + 1, w.second);
+
+      w = Kokkos::atomic_compare_exchange(&m_work_range, w, w_new);
+
+      if (w.first < w.second) {
+        // m_work_range is viable
+
+        // If steal is successful then don't repeat attempt to steal
+        attempt = !(w_new.first == w.first + 1 && w_new.second == w.second);
+      } else {
+        // m_work_range is not viable
+        w.first  = -1;
+        w.second = -1;
+
+        attempt = 0;
+      }
+    }
+
+    if (w.first == -1 && m_steal_rank != m_pool_rank) {
+      HostThreadTeamData *const *const pool =
+          reinterpret_cast<HostThreadTeamData **>(m_pool_scratch +
+                                                  m_pool_members);
+
+      // Attempt from beginning failed, try to steal from end of neighbor
+
+      pair_int_t volatile *steal_range = &(pool[m_steal_rank]->m_work_range);
+
+      for (int attempt = true; attempt;) {
+        // Query and attempt to update steal_work_range
+        //   from: [ w.first , w.second )
+        //   to:   [ w.first , w.second - 1 ) = w_new
+        //
+        // If w is invalid then is just a query.
+
+        const pair_int_t w_new(w.first, w.second - 1);
+
+        w = Kokkos::atomic_compare_exchange(steal_range, w, w_new);
+
+        if (w.first < w.second) {
+          // steal_work_range is viable
+
+          // If steal is successful then don't repeat attempt to steal
+          attempt = !(w_new.first == w.first && w_new.second == w.second - 1);
+        } else {
+          // steal_work_range is not viable, move to next member
+          w.first  = -1;
+          w.second = -1;
+
+          // We need to figure out whether the next team is active
+          // m_steal_rank + m_team_alloc could be the next base_rank to steal
+          // from but only if there are another m_team_size threads available so
+          // that that base rank has a full team.
+          m_steal_rank =
+              m_steal_rank + m_team_alloc + m_team_size <= m_pool_size
+                  ? m_steal_rank + m_team_alloc
+                  : 0;
+
+          steal_range = &(pool[m_steal_rank]->m_work_range);
+
+          // If tried all other members then don't repeat attempt to steal
+          attempt = m_steal_rank != m_pool_rank;
+        }
+      }
+
+      if (w.first != -1) w.first = w.second - 1;
+    }
+
+    if (1 < m_team_size) {
+      // Must share the work index
+      *reinterpret_cast<int volatile *>(team_reduce()) = w.first;
+
+      team_rendezvous_release();
+    }
+  } else if (1 < m_team_size) {
+    w.first = *reinterpret_cast<int volatile *>(team_reduce());
+  }
+
+  // May exit because successfully stole work and w is good.
+  // May exit because no work left to steal and w = (-1,-1).
+
+  return w.first;
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostThreadTeam.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_HostThreadTeam.hpp
new file mode 100644 (file)
index 0000000..7f39f18
--- /dev/null
@@ -0,0 +1,1011 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_HOSTTHREADTEAM_HPP
+#define KOKKOS_IMPL_HOSTTHREADTEAM_HPP
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_Pair.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <impl/Kokkos_FunctorAnalysis.hpp>
+#include <impl/Kokkos_HostBarrier.hpp>
+
+#include <limits>     // std::numeric_limits
+#include <algorithm>  // std::max
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class HostExecSpace>
+class HostThreadTeamMember;
+
+class HostThreadTeamData {
+ public:
+  template <class>
+  friend class HostThreadTeamMember;
+
+  // Assume upper bounds on number of threads:
+  //   pool size       <= 1024 threads
+  //   team size       <= 64 threads
+
+  enum : int { max_pool_members = 1024 };
+  enum : int { max_team_members = 64 };
+  enum : int { max_pool_rendezvous = HostBarrier::required_buffer_size };
+  enum : int { max_team_rendezvous = HostBarrier::required_buffer_size };
+
+ private:
+  // per-thread scratch memory buffer chunks:
+  //
+  //   [ pool_members ]     = [ m_pool_members    .. m_pool_rendezvous )
+  //   [ pool_rendezvous ]  = [ m_pool_rendezvous .. m_team_rendezvous )
+  //   [ team_rendezvous ]  = [ m_team_rendezvous .. m_pool_reduce )
+  //   [ pool_reduce ]      = [ m_pool_reduce     .. m_team_reduce )
+  //   [ team_reduce ]      = [ m_team_reduce     .. m_team_shared )
+  //   [ team_shared ]      = [ m_team_shared     .. m_thread_local )
+  //   [ thread_local ]     = [ m_thread_local    .. m_scratch_size )
+
+  enum : int { m_pool_members = 0 };
+  enum : int {
+    m_pool_rendezvous =
+        static_cast<int>(m_pool_members) + static_cast<int>(max_pool_members)
+  };
+  enum : int {
+    m_team_rendezvous = static_cast<int>(m_pool_rendezvous) +
+                        static_cast<int>(max_pool_rendezvous)
+  };
+  enum : int {
+    m_pool_reduce = static_cast<int>(m_team_rendezvous) +
+                    static_cast<int>(max_team_rendezvous)
+  };
+
+  using pair_int_t = Kokkos::pair<int64_t, int64_t>;
+
+  pair_int_t m_work_range;
+  int64_t m_work_end;
+  int64_t* m_scratch;       // per-thread buffer
+  int64_t* m_pool_scratch;  // == pool[0]->m_scratch
+  int64_t* m_team_scratch;  // == pool[ 0 + m_team_base ]->m_scratch
+  int m_pool_rank;
+  int m_pool_size;
+  size_t m_team_reduce;
+  size_t m_team_shared;
+  size_t m_thread_local;
+  size_t m_scratch_size;
+  int m_team_base;
+  int m_team_rank;
+  int m_team_size;
+  int m_team_alloc;
+  int m_league_rank;
+  int m_league_size;
+  int m_work_chunk;
+  int m_steal_rank;  // work stealing rank
+  int mutable m_pool_rendezvous_step;
+  int mutable m_team_rendezvous_step;
+
+  HostThreadTeamData* team_member(int r) const noexcept {
+    return (reinterpret_cast<HostThreadTeamData**>(
+        m_pool_scratch + m_pool_members))[m_team_base + r];
+  }
+
+ public:
+  inline bool team_rendezvous() const noexcept {
+    int* ptr = reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous);
+    HostBarrier::split_arrive(ptr, m_team_size, m_team_rendezvous_step);
+    if (m_team_rank != 0) {
+      HostBarrier::wait(ptr, m_team_size, m_team_rendezvous_step);
+    } else {
+      HostBarrier::split_master_wait(ptr, m_team_size, m_team_rendezvous_step);
+    }
+
+    return m_team_rank == 0;
+  }
+
+  inline bool team_rendezvous(const int source_team_rank) const noexcept {
+    int* ptr = reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous);
+    HostBarrier::split_arrive(ptr, m_team_size, m_team_rendezvous_step);
+    if (m_team_rank != source_team_rank) {
+      HostBarrier::wait(ptr, m_team_size, m_team_rendezvous_step);
+    } else {
+      HostBarrier::split_master_wait(ptr, m_team_size, m_team_rendezvous_step);
+    }
+
+    return (m_team_rank == source_team_rank);
+  }
+
+  inline void team_rendezvous_release() const noexcept {
+    HostBarrier::split_release(
+        reinterpret_cast<int*>(m_team_scratch + m_team_rendezvous), m_team_size,
+        m_team_rendezvous_step);
+  }
+
+  inline int pool_rendezvous() const noexcept {
+    int* ptr = reinterpret_cast<int*>(m_pool_scratch + m_pool_rendezvous);
+    HostBarrier::split_arrive(ptr, m_pool_size, m_pool_rendezvous_step);
+    if (m_pool_rank != 0) {
+      HostBarrier::wait(ptr, m_pool_size, m_pool_rendezvous_step);
+    } else {
+      HostBarrier::split_master_wait(ptr, m_pool_size, m_pool_rendezvous_step);
+    }
+
+    return m_pool_rank == 0;
+  }
+
+  inline void pool_rendezvous_release() const noexcept {
+    HostBarrier::split_release(
+        reinterpret_cast<int*>(m_pool_scratch + m_pool_rendezvous), m_pool_size,
+        m_pool_rendezvous_step);
+  }
+
+  //----------------------------------------
+
+#ifndef KOKKOS_COMPILER_NVHPC  // FIXME_NVHPC bug in NVHPC regarding constexpr
+                               // constructors used in device code
+  constexpr
+#endif
+      HostThreadTeamData() noexcept
+      : m_work_range(-1, -1),
+        m_work_end(0),
+        m_scratch(nullptr),
+        m_pool_scratch(nullptr),
+        m_team_scratch(nullptr),
+        m_pool_rank(0),
+        m_pool_size(1),
+        m_team_reduce(0),
+        m_team_shared(0),
+        m_thread_local(0),
+        m_scratch_size(0),
+        m_team_base(0),
+        m_team_rank(0),
+        m_team_size(1),
+        m_team_alloc(1),
+        m_league_rank(0),
+        m_league_size(1),
+        m_work_chunk(0),
+        m_steal_rank(0),
+        m_pool_rendezvous_step(0),
+        m_team_rendezvous_step(0) {
+  }
+
+  //----------------------------------------
+  // Organize array of members into a pool.
+  // The 0th member is the root of the pool.
+  // Requires: members are not already in a pool.
+  // Requires: called by one thread.
+  // Pool members are ordered as "close" - sorted by NUMA and then CORE
+  // Each thread is its own team with team_size == 1.
+  static void organize_pool(HostThreadTeamData* members[], const int size);
+
+  // Called by each thread within the pool
+  void disband_pool();
+
+  //----------------------------------------
+  // Each thread within a pool organizes itself into a team.
+  // Must be called by all threads of the pool.
+  // Organizing threads into a team performs a barrier across the
+  // entire pool to insure proper initialization of the team
+  // rendezvous mechanism before a team rendezvous can be performed.
+  //
+  // Return true  if a valid member of a team.
+  // Return false if not a member and thread should be idled.
+  int organize_team(const int team_size);
+
+  // Each thread within a pool disbands itself from current team.
+  // Each thread becomes its own team with team_size == 1.
+  // Must be called by all threads of the pool.
+  void disband_team();
+
+  //----------------------------------------
+
+  constexpr int pool_rank() const { return m_pool_rank; }
+  constexpr int pool_size() const { return m_pool_size; }
+
+  HostThreadTeamData* pool_member(int r) const noexcept {
+    return (reinterpret_cast<HostThreadTeamData**>(m_pool_scratch +
+                                                   m_pool_members))[r];
+  }
+
+  //----------------------------------------
+
+ public:
+  static constexpr size_t align_to_int64(size_t n) {
+    constexpr size_t mask_to_16 = 0x0f;  // align to 16 bytes
+    constexpr size_t shift_to_8 = 3;     // size to 8 bytes
+    return ((n + mask_to_16) & ~mask_to_16) >> shift_to_8;
+  }
+
+  constexpr size_t pool_reduce_bytes() const {
+    return m_scratch_size ? sizeof(int64_t) * (m_team_reduce - m_pool_reduce)
+                          : 0;
+  }
+
+  constexpr size_t team_reduce_bytes() const {
+    return sizeof(int64_t) * (m_team_shared - m_team_reduce);
+  }
+
+  constexpr size_t team_shared_bytes() const {
+    return sizeof(int64_t) * (m_thread_local - m_team_shared);
+  }
+
+  constexpr size_t thread_local_bytes() const {
+    return sizeof(int64_t) * (m_scratch_size - m_thread_local);
+  }
+
+  constexpr size_t scratch_bytes() const {
+    return sizeof(int64_t) * m_scratch_size;
+  }
+
+  // Memory chunks:
+
+  int64_t* scratch_buffer() const noexcept { return m_scratch; }
+
+  int64_t* pool_reduce() const noexcept {
+    return m_pool_scratch + m_pool_reduce;
+  }
+
+  int64_t* pool_reduce_local() const noexcept {
+    return m_scratch + m_pool_reduce;
+  }
+
+  int64_t* team_reduce() const noexcept {
+    return m_team_scratch + m_team_reduce;
+  }
+
+  int64_t* team_reduce_local() const noexcept {
+    return m_scratch + m_team_reduce;
+  }
+
+  int64_t* team_shared() const noexcept {
+    return m_team_scratch + m_team_shared;
+  }
+
+  int64_t* local_scratch() const noexcept { return m_scratch + m_thread_local; }
+
+  // Given:
+  //   pool_reduce_size  = number bytes for pool reduce
+  //   team_reduce_size  = number bytes for team reduce
+  //   team_shared_size  = number bytes for team shared memory
+  //   thread_local_size = number bytes for thread local memory
+  // Return:
+  //   total number of bytes that must be allocated
+  static size_t scratch_size(size_t pool_reduce_size, size_t team_reduce_size,
+                             size_t team_shared_size,
+                             size_t thread_local_size) {
+    pool_reduce_size  = align_to_int64(pool_reduce_size);
+    team_reduce_size  = align_to_int64(team_reduce_size);
+    team_shared_size  = align_to_int64(team_shared_size);
+    thread_local_size = align_to_int64(thread_local_size);
+
+    const size_t total_bytes =
+        (m_pool_reduce + pool_reduce_size + team_reduce_size +
+         team_shared_size + thread_local_size) *
+        sizeof(int64_t);
+
+    return total_bytes;
+  }
+
+  // Given:
+  //   alloc_ptr         = pointer to allocated memory
+  //   alloc_size        = number bytes of allocated memory
+  //   pool_reduce_size  = number bytes for pool reduce/scan operations
+  //   team_reduce_size  = number bytes for team reduce/scan operations
+  //   team_shared_size  = number bytes for team-shared memory
+  //   thread_local_size = number bytes for thread-local memory
+  // Return:
+  //   total number of bytes that must be allocated
+  void scratch_assign(void* const alloc_ptr, size_t const alloc_size,
+                      int pool_reduce_size, int team_reduce_size,
+                      size_t team_shared_size, size_t /* thread_local_size */) {
+    pool_reduce_size = align_to_int64(pool_reduce_size);
+    team_reduce_size = align_to_int64(team_reduce_size);
+    team_shared_size = align_to_int64(team_shared_size);
+    // thread_local_size = align_to_int64( thread_local_size );
+
+    m_scratch      = static_cast<int64_t*>(alloc_ptr);
+    m_team_reduce  = m_pool_reduce + pool_reduce_size;
+    m_team_shared  = m_team_reduce + team_reduce_size;
+    m_thread_local = m_team_shared + team_shared_size;
+    m_scratch_size = align_to_int64(alloc_size);
+  }
+
+  //----------------------------------------
+  // Get a work index within the range.
+  // First try to steal from beginning of own teams's partition.
+  // If that fails then try to steal from end of another teams' partition.
+  int get_work_stealing() noexcept;
+
+  //----------------------------------------
+  // Set the initial work partitioning of [ 0 .. length ) among the teams
+  // with granularity of chunk
+
+  void set_work_partition(int64_t const length, int const chunk) noexcept {
+    // Minimum chunk size to insure that
+    //   m_work_end < std::numeric_limits<int>::max() * m_work_chunk
+
+    int const chunk_min = (length + std::numeric_limits<int>::max()) /
+                          std::numeric_limits<int>::max();
+
+    m_work_end   = length;
+    m_work_chunk = std::max(chunk, chunk_min);
+
+    // Number of work chunks and partitioning of that number:
+    int const num  = (m_work_end + m_work_chunk - 1) / m_work_chunk;
+    int const part = (num + m_league_size - 1) / m_league_size;
+
+    m_work_range.first  = part * m_league_rank;
+    m_work_range.second = m_work_range.first + part;
+
+    // Steal from next team, round robin
+    // The next team is offset by m_team_alloc if it fits in the pool.
+
+    m_steal_rank = m_team_base + m_team_alloc + m_team_size <= m_pool_size
+                       ? m_team_base + m_team_alloc
+                       : 0;
+  }
+
+  std::pair<int64_t, int64_t> get_work_partition() noexcept {
+    int64_t first  = m_work_range.first;
+    int64_t second = m_work_range.second;
+    first *= m_work_chunk;
+    second *= m_work_chunk;
+    return std::pair<int64_t, int64_t>(
+        first, second < m_work_end ? second : m_work_end);
+  }
+
+  std::pair<int64_t, int64_t> get_work_stealing_chunk() noexcept {
+    std::pair<int64_t, int64_t> x(-1, -1);
+
+    const int i = get_work_stealing();
+
+    if (0 <= i) {
+      x.first  = m_work_chunk * i;
+      x.second = x.first + m_work_chunk < m_work_end ? x.first + m_work_chunk
+                                                     : m_work_end;
+    }
+
+    return x;
+  }
+};
+
+//----------------------------------------------------------------------------
+
+template <class HostExecSpace>
+class HostThreadTeamMember {
+ public:
+  using scratch_memory_space    = typename HostExecSpace::scratch_memory_space;
+  using execution_space         = HostExecSpace;
+  using thread_team_member      = HostThreadTeamMember;
+  using host_thread_team_member = HostThreadTeamMember;
+
+ private:
+  scratch_memory_space m_scratch;
+  HostThreadTeamData& m_data;
+  int const m_league_rank;
+  int const m_league_size;
+
+ public:
+  constexpr HostThreadTeamMember(HostThreadTeamData& arg_data) noexcept
+      : m_scratch(arg_data.team_shared(), arg_data.team_shared_bytes()),
+        m_data(arg_data),
+        m_league_rank(arg_data.m_league_rank),
+        m_league_size(arg_data.m_league_size) {}
+
+  constexpr HostThreadTeamMember(HostThreadTeamData& arg_data,
+                                 int const arg_league_rank,
+                                 int const arg_league_size) noexcept
+      : m_scratch(arg_data.team_shared(), arg_data.team_shared_bytes(),
+                  arg_data.team_shared(), arg_data.team_shared_bytes()),
+        m_data(arg_data),
+        m_league_rank(arg_league_rank),
+        m_league_size(arg_league_size) {}
+
+  ~HostThreadTeamMember()                           = default;
+  HostThreadTeamMember()                            = delete;
+  HostThreadTeamMember(HostThreadTeamMember&&)      = default;
+  HostThreadTeamMember(HostThreadTeamMember const&) = default;
+  HostThreadTeamMember& operator=(HostThreadTeamMember&&) = default;
+  HostThreadTeamMember& operator=(HostThreadTeamMember const&) = default;
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  int team_rank() const noexcept { return m_data.m_team_rank; }
+
+  KOKKOS_INLINE_FUNCTION
+  int team_size() const noexcept { return m_data.m_team_size; }
+
+  KOKKOS_INLINE_FUNCTION
+  int league_rank() const noexcept { return m_league_rank; }
+
+  KOKKOS_INLINE_FUNCTION
+  int league_size() const noexcept { return m_league_size; }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  const scratch_memory_space& team_shmem() const {
+    return m_scratch.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const scratch_memory_space& team_scratch(int) const {
+    return m_scratch.set_team_thread_mode(0, 1, 0);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const scratch_memory_space& thread_scratch(int) const {
+    return m_scratch.set_team_thread_mode(0, m_data.m_team_size,
+                                          m_data.m_team_rank);
+  }
+
+  //--------------------------------------------------------------------------
+  // Team collectives
+  //--------------------------------------------------------------------------
+
+  KOKKOS_INLINE_FUNCTION void team_barrier() const noexcept {
+    KOKKOS_IF_ON_HOST(
+        (if (m_data.team_rendezvous()) { m_data.team_rendezvous_release(); }))
+  }
+
+  //--------------------------------------------------------------------------
+
+  template <typename T>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(T& value,
+                                             const int source_team_rank) const
+      noexcept {
+    KOKKOS_IF_ON_HOST((if (1 < m_data.m_team_size) {
+      T volatile* const shared_value = (T*)m_data.team_reduce();
+
+      // Don't overwrite shared memory until all threads arrive
+
+      if (m_data.team_rendezvous(source_team_rank)) {
+        // All threads have entered 'team_rendezvous'
+        // only this thread returned from 'team_rendezvous'
+        // with a return value of 'true'
+
+        *shared_value = value;
+
+        m_data.team_rendezvous_release();
+        // This thread released all other threads from 'team_rendezvous'
+        // with a return value of 'false'
+      } else {
+        value = *shared_value;
+      }
+    }))
+
+    KOKKOS_IF_ON_DEVICE(((void)value; (void)source_team_rank; Kokkos::abort(
+                             "HostThreadTeamMember team_broadcast\n");))
+  }
+
+  //--------------------------------------------------------------------------
+
+  template <class Closure, typename T>
+  KOKKOS_INLINE_FUNCTION void team_broadcast(Closure const& f, T& value,
+                                             const int source_team_rank) const
+      noexcept {
+    KOKKOS_IF_ON_HOST((
+        T volatile* const shared_value = (T*)m_data.team_reduce();
+
+        // Don't overwrite shared memory until all threads arrive
+
+        if (m_data.team_rendezvous(source_team_rank)) {
+          // All threads have entered 'team_rendezvous'
+          // only this thread returned from 'team_rendezvous'
+          // with a return value of 'true'
+
+          f(value);
+
+          if (1 < m_data.m_team_size) {
+            *shared_value = value;
+          }
+
+          m_data.team_rendezvous_release();
+          // This thread released all other threads from 'team_rendezvous'
+          // with a return value of 'false'
+        } else { value = *shared_value; }))
+
+    KOKKOS_IF_ON_DEVICE(
+        ((void)f; (void)value; (void)source_team_rank;
+         Kokkos::abort("HostThreadTeamMember team_broadcast\n");))
+  }
+
+  //--------------------------------------------------------------------------
+  // team_reduce( Sum(result) );
+  // team_reduce( Min(result) );
+  // team_reduce( Max(result) );
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer) const noexcept {
+    team_reduce(reducer, reducer.reference());
+  }
+
+  template <typename ReducerType>
+  KOKKOS_INLINE_FUNCTION std::enable_if_t<is_reducer<ReducerType>::value>
+  team_reduce(ReducerType const& reducer,
+              typename ReducerType::value_type contribution) const noexcept {
+    KOKKOS_IF_ON_HOST((
+        if (1 < m_data.m_team_size) {
+          using value_type = typename ReducerType::value_type;
+
+          if (0 != m_data.m_team_rank) {
+            // Non-root copies to their local buffer:
+            /*reducer.copy( (value_type*) m_data.team_reduce_local()
+                        , reducer.data() );*/
+            *((value_type*)m_data.team_reduce_local()) = contribution;
+          }
+
+          // Root does not overwrite shared memory until all threads arrive
+          // and copy to their local buffer.
+
+          if (m_data.team_rendezvous()) {
+            // All threads have entered 'team_rendezvous'
+            // only this thread returned from 'team_rendezvous'
+            // with a return value of 'true'
+            //
+            // This thread sums contributed values
+            for (int i = 1; i < m_data.m_team_size; ++i) {
+              value_type* const src =
+                  (value_type*)m_data.team_member(i)->team_reduce_local();
+
+              reducer.join(contribution, *src);
+            }
+
+            // Copy result to root member's buffer:
+            // reducer.copy( (value_type*) m_data.team_reduce() , reducer.data()
+            // );
+            *((value_type*)m_data.team_reduce()) = contribution;
+            reducer.reference()                  = contribution;
+            m_data.team_rendezvous_release();
+            // This thread released all other threads from 'team_rendezvous'
+            // with a return value of 'false'
+          } else {
+            // Copy from root member's buffer:
+            reducer.reference() = *((value_type*)m_data.team_reduce());
+          }
+        } else { reducer.reference() = contribution; }))
+
+    KOKKOS_IF_ON_DEVICE(((void)reducer; (void)contribution;
+                         Kokkos::abort("HostThreadTeamMember team_reduce\n");))
+  }
+
+  //--------------------------------------------------------------------------
+
+  template <typename T>
+  KOKKOS_INLINE_FUNCTION T team_scan(T const& value,
+                                     T* const global = nullptr) const noexcept {
+    KOKKOS_IF_ON_HOST((
+        if (0 != m_data.m_team_rank) {
+          // Non-root copies to their local buffer:
+          ((T*)m_data.team_reduce_local())[1] = value;
+        }
+
+        // Root does not overwrite shared memory until all threads arrive
+        // and copy to their local buffer.
+
+        if (m_data.team_rendezvous()) {
+          // All threads have entered 'team_rendezvous'
+          // only this thread returned from 'team_rendezvous'
+          // with a return value of 'true'
+          //
+          // This thread scans contributed values
+
+          {
+            T* prev = (T*)m_data.team_reduce_local();
+
+            prev[0] = 0;
+            prev[1] = value;
+
+            for (int i = 1; i < m_data.m_team_size; ++i) {
+              T* const ptr = (T*)m_data.team_member(i)->team_reduce_local();
+
+              ptr[0] = prev[0] + prev[1];
+
+              prev = ptr;
+            }
+          }
+
+          // If adding to global value then atomic_fetch_add to that value
+          // and sum previous value to every entry of the scan.
+          if (global) {
+            T* prev = (T*)m_data.team_reduce_local();
+
+            {
+              T* ptr = (T*)m_data.team_member(m_data.m_team_size - 1)
+                           ->team_reduce_local();
+              prev[0] = Kokkos::atomic_fetch_add(global, ptr[0] + ptr[1]);
+            }
+
+            for (int i = 1; i < m_data.m_team_size; ++i) {
+              T* ptr = (T*)m_data.team_member(i)->team_reduce_local();
+              ptr[0] += prev[0];
+            }
+          }
+
+          m_data.team_rendezvous_release();
+        }
+
+        return ((T*)m_data.team_reduce_local())[0];))
+
+    KOKKOS_IF_ON_DEVICE(((void)value; (void)global;
+                         Kokkos::abort("HostThreadTeamMember team_scan\n");
+                         return T();))
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+template <typename iType, typename Member>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<iType, Member>
+TeamThreadRange(
+    Member const& member, iType count,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Member>(member, 0, count);
+}
+
+template <typename iType1, typename iType2, typename Member>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Member>
+TeamThreadRange(
+    Member const& member, iType1 begin, iType2 end,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  return Impl::TeamThreadRangeBoundariesStruct<
+      std::common_type_t<iType1, iType2>, Member>(member, begin, end);
+}
+
+template <typename iType, typename Member>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<iType, Member>
+TeamVectorRange(
+    Member const& member, iType count,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  return Impl::TeamThreadRangeBoundariesStruct<iType, Member>(member, 0, count);
+}
+
+template <typename iType1, typename iType2, typename Member>
+KOKKOS_INLINE_FUNCTION Impl::TeamThreadRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Member>
+TeamVectorRange(
+    Member const& member, iType1 begin, iType2 end,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  return Impl::TeamThreadRangeBoundariesStruct<
+      std::common_type_t<iType1, iType2>, Member>(member, begin, end);
+}
+
+template <typename iType, typename Member>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<iType, Member>
+ThreadVectorRange(
+    Member const& member, iType count,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Member>(member, count);
+}
+
+template <typename iType1, typename iType2, typename Member>
+KOKKOS_INLINE_FUNCTION Impl::ThreadVectorRangeBoundariesStruct<
+    std::common_type_t<iType1, iType2>, Member>
+ThreadVectorRange(
+    Member const& member, iType1 arg_begin, iType2 arg_end,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  using iType = std::common_type_t<iType1, iType2>;
+  return Impl::ThreadVectorRangeBoundariesStruct<iType, Member>(
+      member, iType(arg_begin), iType(arg_end));
+}
+
+//----------------------------------------------------------------------------
+/** \brief  Inter-thread parallel_for.
+ *
+ * Executes lambda(iType i) for each i=[0..N)
+ *
+ * The range [0..N) is mapped to all threads of the the calling thread team.
+ */
+template <typename iType, class Closure, class Member>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    Impl::TeamThreadRangeBoundariesStruct<iType, Member> const& loop_boundaries,
+    Closure const& closure,
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value> const** =
+        nullptr) {
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    closure(i);
+  }
+}
+
+template <typename iType, class Closure, class Member>
+KOKKOS_INLINE_FUNCTION void parallel_for(
+    Impl::ThreadVectorRangeBoundariesStruct<iType, Member> const&
+        loop_boundaries,
+    Closure const& closure,
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value> const** =
+        nullptr) {
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    closure(i);
+  }
+}
+
+//----------------------------------------------------------------------------
+
+template <typename iType, class Closure, class Reducer, class Member>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Kokkos::is_reducer<Reducer>::value &&
+                     Impl::is_host_thread_team_member<Member>::value>
+    parallel_reduce(Impl::TeamThreadRangeBoundariesStruct<iType, Member> const&
+                        loop_boundaries,
+                    Closure const& closure, Reducer const& reducer) {
+  typename Reducer::value_type value;
+  reducer.init(value);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    closure(i, value);
+  }
+
+  loop_boundaries.thread.team_reduce(reducer, value);
+}
+
+template <typename iType, typename Closure, typename ValueType, typename Member>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<!Kokkos::is_reducer<ValueType>::value &&
+                     Impl::is_host_thread_team_member<Member>::value>
+    parallel_reduce(Impl::TeamThreadRangeBoundariesStruct<iType, Member> const&
+                        loop_boundaries,
+                    Closure const& closure, ValueType& result) {
+  ValueType val;
+  Sum<ValueType> reducer(val);
+  reducer.init(val);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    closure(i, reducer.reference());
+  }
+
+  loop_boundaries.thread.team_reduce(reducer);
+  result = reducer.reference();
+}
+
+/*template< typename iType, class Space
+         , class Closure, class Joiner , typename ValueType >
+KOKKOS_INLINE_FUNCTION
+void parallel_reduce
+  (
+Impl::TeamThreadRangeBoundariesStruct<iType,Impl::HostThreadTeamMember<Space> >
+             const & loop_boundaries
+  , Closure  const & closure
+  , Joiner   const & joiner
+  , ValueType      & result
+  )
+{
+  Impl::Reducer< ValueType , Joiner > reducer( joiner , & result );
+
+  reducer.init( reducer.data() );
+
+  for( iType i = loop_boundaries.start
+     ; i <  loop_boundaries.end
+     ; i += loop_boundaries.increment ) {
+    closure( i , reducer.reference() );
+  }
+
+  loop_boundaries.thread.team_reduce( reducer );
+}*/
+
+//----------------------------------------------------------------------------
+/** \brief  Inter-thread vector parallel_reduce.
+ *
+ *  Executes lambda(iType i, ValueType & val) for each i=[0..N)
+ *
+ *  The range [0..N) is mapped to all threads of the
+ *  calling thread team and a summation of  val is
+ *  performed and put into result.
+ */
+template <typename iType, class Lambda, typename ValueType, typename Member>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<!Kokkos::is_reducer<ValueType>::value &&
+                     Impl::is_host_thread_team_member<Member>::value>
+    parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+                        iType, Member>& loop_boundaries,
+                    const Lambda& lambda, ValueType& result) {
+  result = ValueType();
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, result);
+  }
+}
+
+template <typename iType, class Lambda, typename ReducerType, typename Member>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Kokkos::is_reducer<ReducerType>::value &&
+                     Impl::is_host_thread_team_member<Member>::value>
+    parallel_reduce(const Impl::ThreadVectorRangeBoundariesStruct<
+                        iType, Member>& loop_boundaries,
+                    const Lambda& lambda, const ReducerType& reducer) {
+  reducer.init(reducer.reference());
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, reducer.reference());
+  }
+}
+
+//----------------------------------------------------------------------------
+
+template <typename iType, class Closure, class Member>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+    parallel_scan(Impl::TeamThreadRangeBoundariesStruct<iType, Member> const&
+                      loop_boundaries,
+                  Closure const& closure) {
+  // Extract ValueType from the closure
+
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Kokkos::Impl::FunctorPatternInterface::SCAN, void, Closure>::value_type;
+
+  value_type accum = 0;
+
+  // Intra-member scan
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    closure(i, accum, false);
+  }
+
+  // 'accum' output is the exclusive prefix sum
+  accum = loop_boundaries.thread.team_scan(accum);
+
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    closure(i, accum, true);
+  }
+}
+
+template <typename iType, class ClosureType, class Member>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+    parallel_scan(Impl::ThreadVectorRangeBoundariesStruct<iType, Member> const&
+                      loop_boundaries,
+                  ClosureType const& closure) {
+  using value_type = typename Kokkos::Impl::FunctorAnalysis<
+      Impl::FunctorPatternInterface::SCAN, void, ClosureType>::value_type;
+
+  value_type scan_val = value_type();
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    closure(i, scan_val, true);
+  }
+}
+
+template <typename iType, class Lambda, typename ReducerType, typename Member>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Kokkos::is_reducer<ReducerType>::value &&
+                     Impl::is_host_thread_team_member<Member>::value>
+    parallel_scan(const Impl::ThreadVectorRangeBoundariesStruct<iType, Member>&
+                      loop_boundaries,
+                  const Lambda& lambda, const ReducerType& reducer) {
+  typename ReducerType::value_type scan_val;
+  reducer.init(scan_val);
+
+#ifdef KOKKOS_ENABLE_PRAGMA_IVDEP
+#pragma ivdep
+#endif
+  for (iType i = loop_boundaries.start; i < loop_boundaries.end;
+       i += loop_boundaries.increment) {
+    lambda(i, scan_val, true);
+  }
+}
+
+//----------------------------------------------------------------------------
+
+template <class Member>
+KOKKOS_INLINE_FUNCTION Impl::ThreadSingleStruct<Member> PerTeam(
+    Member const& member,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  return Impl::ThreadSingleStruct<Member>(member);
+}
+
+template <class Member>
+KOKKOS_INLINE_FUNCTION Impl::VectorSingleStruct<Member> PerThread(
+    Member const& member,
+    std::enable_if_t<Impl::is_thread_team_member<Member>::value> const** =
+        nullptr) {
+  return Impl::VectorSingleStruct<Member>(member);
+}
+
+template <class Member, class FunctorType>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+    single(const Impl::ThreadSingleStruct<Member>& single,
+           const FunctorType& functor) {
+  // 'single' does not perform a barrier.
+  if (single.team_member.team_rank() == 0) functor();
+}
+
+template <class Member, class FunctorType, typename ValueType>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+    single(const Impl::ThreadSingleStruct<Member>& single,
+           const FunctorType& functor, ValueType& val) {
+  single.team_member.team_broadcast(functor, val, 0);
+}
+
+template <class Member, class FunctorType>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+    single(const Impl::VectorSingleStruct<Member>&,
+           const FunctorType& functor) {
+  functor();
+}
+
+template <class Member, class FunctorType, typename ValueType>
+KOKKOS_INLINE_FUNCTION
+    std::enable_if_t<Impl::is_host_thread_team_member<Member>::value>
+    single(const Impl::VectorSingleStruct<Member>&, const FunctorType& functor,
+           ValueType& val) {
+  functor(val);
+}
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_IMPL_HOSTTHREADTEAM_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_InitializationSettings.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_InitializationSettings.hpp
new file mode 100644 (file)
index 0000000..ceb35f0
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_INITIALIZATION_SETTINGS_HPP
+#define KOKKOS_INITIALIZATION_SETTINGS_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <climits>
+#include <string>
+
+namespace Kokkos {
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+struct InitArguments {
+  int num_threads;
+  int num_numa;
+  int device_id;
+  int ndevices;
+  int skip_device;
+  bool disable_warnings;
+  bool tune_internals;
+  bool tool_help        = false;
+  std::string tool_lib  = {};
+  std::string tool_args = {};
+
+  KOKKOS_DEPRECATED_WITH_COMMENT("Use InitializationSettings instead!")
+  InitArguments(int nt = -1, int nn = -1, int dv = -1, bool dw = false,
+                bool ti = false)
+      : num_threads{nt},
+        num_numa{nn},
+        device_id{dv},
+        ndevices{-1},
+        skip_device{9999},
+        disable_warnings{dw},
+        tune_internals{ti} {}
+};
+#endif
+
+namespace Impl {
+// FIXME_CXX17 replace with std::optional
+template <class>
+struct InitializationSettingsHelper;
+template <>
+struct InitializationSettingsHelper<int> {
+  using value_type   = int;
+  using storage_type = int;
+
+  static constexpr storage_type unspecified = INT_MIN;
+};
+template <>
+struct InitializationSettingsHelper<bool> {
+  using value_type   = bool;
+  using storage_type = char;
+
+  static constexpr storage_type unspecified = CHAR_MAX;
+  static_assert(static_cast<storage_type>(true) != unspecified &&
+                    static_cast<storage_type>(false) != unspecified,
+                "");
+};
+template <>
+struct InitializationSettingsHelper<std::string> {
+  using value_type   = std::string;
+  using storage_type = std::string;
+
+  static storage_type const unspecified;
+};
+}  // namespace Impl
+
+class InitializationSettings {
+#define KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) \
+  impl_do_not_use_i_really_mean_it_##NAME##_
+
+#define KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME) impl_##NAME##_type
+
+#define KOKKOS_IMPL_DECLARE(TYPE, NAME)                                      \
+ private:                                                                    \
+  using KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME) = TYPE;                 \
+  Impl::InitializationSettingsHelper<TYPE>::storage_type                     \
+      KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) =                              \
+          Impl::InitializationSettingsHelper<TYPE>::unspecified;             \
+                                                                             \
+ public:                                                                     \
+  InitializationSettings& set_##NAME(                                        \
+      Impl::InitializationSettingsHelper<TYPE>::value_type NAME) {           \
+    KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) = NAME;                          \
+    return *this;                                                            \
+  }                                                                          \
+  bool has_##NAME() const noexcept {                                         \
+    return KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME) !=                        \
+           Impl::InitializationSettingsHelper<                               \
+               KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME)>::unspecified;   \
+  }                                                                          \
+  KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE(NAME) get_##NAME() const noexcept { \
+    return KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER(NAME);                          \
+  }                                                                          \
+  static_assert(true, "no-op to require trailing semicolon")
+
+ public:
+  KOKKOS_IMPL_DECLARE(int, num_threads);
+  KOKKOS_IMPL_DECLARE(int, device_id);
+  KOKKOS_IMPL_DECLARE(std::string, map_device_id_by);
+  KOKKOS_IMPL_DECLARE(int, num_devices);  // deprecated
+  KOKKOS_IMPL_DECLARE(int, skip_device);  // deprecated
+  KOKKOS_IMPL_DECLARE(bool, disable_warnings);
+  KOKKOS_IMPL_DECLARE(bool, print_configuration);
+  KOKKOS_IMPL_DECLARE(bool, tune_internals);
+  KOKKOS_IMPL_DECLARE(bool, tools_help);
+  KOKKOS_IMPL_DECLARE(std::string, tools_libs);
+  KOKKOS_IMPL_DECLARE(std::string, tools_args);
+
+#undef KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER_TYPE
+#undef KOKKOS_IMPL_INIT_ARGS_DATA_MEMBER
+#undef KOKKOS_IMPL_DECLARE
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+ public:
+  InitializationSettings() = default;
+
+  InitializationSettings(InitArguments const& old) {
+    if (old.num_threads != -1) {
+      set_num_threads(old.num_threads);
+    }
+    if (old.device_id != -1) {
+      set_device_id(old.device_id);
+    }
+    if (old.ndevices != -1) {
+      set_num_devices(old.ndevices);
+    }
+    if (old.skip_device != 9999) {
+      set_skip_device(old.skip_device);
+    }
+    if (old.disable_warnings) {
+      set_disable_warnings(true);
+    }
+    if (old.tune_internals) {
+      set_tune_internals(true);
+    }
+    if (old.tool_help) {
+      set_tools_help(true);
+    }
+    if (!old.tool_lib.empty()) {
+      set_tools_libs(old.tool_lib);
+    }
+    if (!old.tool_args.empty()) {
+      set_tools_args(old.tool_args);
+    }
+  }
+#endif
+};
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_LIFO.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_LIFO.hpp
new file mode 100644 (file)
index 0000000..286c567
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_LIFO_HPP
+#define KOKKOS_IMPL_LIFO_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_TASKDAG
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_PointerOwnership.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_Error.hpp>  // KOKKOS_EXPECTS
+#include <impl/Kokkos_LinkedListNode.hpp>
+
+#include <Kokkos_Atomic.hpp>  // atomic_compare_exchange, atomic_fence
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class T>
+struct LockBasedLIFOCommon {
+  using value_type = T;
+
+  using node_type = SimpleSinglyLinkedListNode<>;
+
+  static constexpr uintptr_t LockTag = ~uintptr_t(0);
+  static constexpr uintptr_t EndTag  = ~uintptr_t(1);
+
+  OwningRawPtr<node_type> m_head = reinterpret_cast<node_type*>(EndTag);
+
+  KOKKOS_INLINE_FUNCTION
+  bool _try_push_node(node_type& node) {
+    KOKKOS_EXPECTS(!node.is_enqueued());
+
+    auto* volatile& next = LinkedListNodeAccess::next_ptr(node);
+
+    // store the head of the queue in a local variable
+    auto* old_head = m_head;
+
+    // retry until someone locks the queue or we successfully compare exchange
+    while (old_head != reinterpret_cast<node_type*>(LockTag)) {
+      // TODO @tasking @memory_order DSH this should have a memory order and not
+      // a memory fence
+
+      // set task->next to the head of the queue
+      next = old_head;
+
+      // fence to emulate acquire semantics on next and release semantics on
+      // the store of m_head
+      // Do not proceed until 'next' has been stored.
+      Kokkos::memory_fence();
+
+      // store the old head
+      auto* const old_head_tmp = old_head;
+
+      // attempt to swap task with the old head of the queue
+      // as if this were done atomically:
+      //   if(m_head == old_head) {
+      //     m_head = &node;
+      //   }
+      //   old_head = m_head;
+      old_head = ::Kokkos::atomic_compare_exchange(&m_head, old_head, &node);
+
+      if (old_head_tmp == old_head) return true;
+    }
+
+    // Failed, replace 'task->m_next' value since 'task' remains
+    // not a member of a queue.
+
+    // TODO @tasking @memory_order DSH this should have a memory order and not a
+    // memory fence
+    LinkedListNodeAccess::mark_as_not_enqueued(node);
+
+    // fence to emulate acquire semantics on next
+    // Do not proceed until 'next' has been stored.
+    ::Kokkos::memory_fence();
+
+    return false;
+  }
+
+  bool _is_empty() const noexcept {
+    // TODO @tasking @memory_order DSH make this an atomic load with memory
+    // order
+    return (volatile node_type*)this->m_head ==
+           reinterpret_cast<node_type*>(EndTag);
+  }
+};
+
+//------------------------------------------------------------------------------
+//------------------------------------------------------------------------------
+
+template <class T>
+class LockBasedLIFO : private LockBasedLIFOCommon<T> {
+ private:
+  using base_t    = LockBasedLIFOCommon<T>;
+  using node_type = typename base_t::node_type;
+
+ public:
+  using value_type               = typename base_t::value_type;  // = T
+  using intrusive_node_base_type = SimpleSinglyLinkedListNode<>;
+
+ public:
+  LockBasedLIFO()                     = default;
+  LockBasedLIFO(LockBasedLIFO const&) = delete;
+  LockBasedLIFO(LockBasedLIFO&&)      = delete;
+  LockBasedLIFO& operator=(LockBasedLIFO const&) = delete;
+  LockBasedLIFO& operator=(LockBasedLIFO&&) = delete;
+
+  ~LockBasedLIFO() = default;
+
+  bool empty() const noexcept {
+    // TODO @tasking @memory_order DSH memory order
+    return this->_is_empty();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<T> pop(bool abort_on_locked = false) {
+    // Put this in here to avoid requiring value_type to be complete until now.
+    static_assert(std::is_base_of<intrusive_node_base_type, value_type>::value,
+                  "Intrusive linked-list value_type must be derived from "
+                  "intrusive_node_base_type");
+
+    // We can't use the static constexpr LockTag directly because
+    // atomic_compare_exchange needs to bind a reference to that, and you
+    // can't do that with static constexpr variables.
+    auto* const lock_tag = (node_type*)base_t::LockTag;
+
+    // TODO @tasking @memory_order DSH shouldn't this be a relaxed atomic load?
+    // start with the return value equal to the head
+    auto* rv = this->m_head;
+
+    // Retry until the lock is acquired or the queue is empty.
+    while (rv != (node_type*)base_t::EndTag) {
+      // The only possible values for the queue are
+      // (1) lock, (2) end, or (3) a valid task.
+      // Thus zero will never appear in the queue.
+      //
+      // If queue is locked then just read by guaranteeing the CAS will fail.
+      KOKKOS_ASSERT(rv != nullptr);
+
+      if (rv == lock_tag) {
+        // TODO @tasking @memory_order DSH this should just be an atomic load
+        // followed by a continue just set rv to nullptr for now, effectively
+        // turning the atomic_compare_exchange below into a load
+        rv = nullptr;
+        if (abort_on_locked) {
+          break;
+        }
+      }
+
+      auto* const old_rv = rv;
+
+      // TODO @tasking @memory_order DSH this should be a weak compare exchange
+      // in a loop
+      rv = Kokkos::atomic_compare_exchange(&(this->m_head), old_rv, lock_tag);
+
+      if (rv == old_rv) {
+        // CAS succeeded and queue is locked
+        //
+        // This thread has locked the queue and removed 'rv' from the queue.
+        // Extract the next entry of the queue from 'rv->m_next'
+        // and mark 'rv' as popped from a queue by setting
+        // 'rv->m_next = nullptr'.
+        //
+        // Place the next entry in the head of the queue,
+        // which also unlocks the queue.
+        //
+        // This thread has exclusive access to
+        // the queue and the popped task's m_next.
+
+        // TODO @tasking @memory_order DSH check whether the volatile is needed
+        // here
+        auto* volatile& next = LinkedListNodeAccess::next_ptr(*rv);  //->m_next;
+
+        // This algorithm is not lockfree because a adversarial scheduler could
+        // context switch this thread at this point and the rest of the threads
+        // calling this method would never make forward progress
+
+        // TODO @tasking @memory_order DSH I think this needs to be a atomic
+        // store release (and the memory fence needs to be removed)
+        // TODO @tasking DSH prove that this doesn't need to be a volatile store
+        // Lock is released here
+        this->m_head = next;
+
+        // Mark rv as popped by assigning nullptr to the next
+        LinkedListNodeAccess::mark_as_not_enqueued(*rv);
+
+        Kokkos::memory_fence();
+
+        return OptionalRef<T>{*static_cast<T*>(rv)};
+      }
+
+      // Otherwise, the CAS got a value that didn't match (either because
+      // another thread locked the queue and we observed the lock tag or because
+      // another thread replaced the head and now we want to try to lock the
+      // queue with that as the popped item.  Either way, try again.
+    }
+
+    // Return an empty OptionalRef by calling the default constructor
+    return {};
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<T> steal() {
+    // TODO @tasking @optimization DSH do this with fewer retries
+    return pop(/* abort_on_locked = */ true);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool push(node_type& node) {
+    while (!this->_try_push_node(node)) { /* retry until success */
+    }
+    // for consistency with push interface on other queue types:
+    return true;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool push(node_type&& node) {
+    // Just forward to the lvalue version
+    return push(node);
+  }
+};
+
+/** @brief A Multiple Producer, Single Consumer Queue with some special
+ * semantics
+ *
+ * This multi-producer, single consumer queue has the following semantics:
+ *
+ *   - Any number of threads may call `try_emplace`/`try_push`
+ *       + These operations are lock-free.
+ *   - Exactly one thread calls `consume()`, and the call occurs exactly once
+ *     in the lifetime of the queue.
+ *       + This operation is lock-free (and wait-free w.r.t. producers)
+ *   - Any calls to `try_push` that happen-before the call to
+ *     `consume()` will succeed and return an true, such that the `consume()`
+ *     call will visit that node.
+ *   - Any calls to `try_push` for which the single call to `consume()`
+ *     happens-before those calls will return false and the node given as
+ *     an argument to `try_push` will not be visited by consume()
+ *
+ *
+ * @tparam T The type of items in the queue
+ *
+ */
+template <class T>
+class SingleConsumeOperationLIFO : private LockBasedLIFOCommon<T> {
+ private:
+  using base_t    = LockBasedLIFOCommon<T>;
+  using node_type = typename base_t::node_type;
+
+  // Allows us to reuse the existing infrastructure for
+  static constexpr auto ConsumedTag = base_t::LockTag;
+
+ public:
+  using value_type = typename base_t::value_type;  // = T
+
+  KOKKOS_DEFAULTED_FUNCTION
+  SingleConsumeOperationLIFO() noexcept = default;
+
+  SingleConsumeOperationLIFO(SingleConsumeOperationLIFO const&) = delete;
+  SingleConsumeOperationLIFO(SingleConsumeOperationLIFO&&)      = delete;
+  SingleConsumeOperationLIFO& operator=(SingleConsumeOperationLIFO const&) =
+      delete;
+  SingleConsumeOperationLIFO& operator=(SingleConsumeOperationLIFO&&) = delete;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~SingleConsumeOperationLIFO() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  bool empty() const noexcept {
+    // TODO @tasking @memory_order DSH memory order
+    return this->_is_empty();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_consumed() const noexcept {
+    // TODO @tasking @memory_order DSH memory order?
+    return this->m_head == (node_type*)ConsumedTag;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool try_push(node_type& node) {
+    return this->_try_push_node(node);
+    // Ensures: (return value is true) || (node.is_enqueued() == false);
+  }
+
+  template <class Function>
+  KOKKOS_INLINE_FUNCTION void consume(Function&& f) {
+    auto* const consumed_tag = (node_type*)ConsumedTag;
+
+    // Swap the Consumed tag into the head of the queue:
+
+    // (local variable used for assertion only)
+    // TODO @tasking @memory_order DSH this should have memory order release, I
+    // think
+    Kokkos::memory_fence();
+    auto old_head = Kokkos::atomic_exchange(&(this->m_head), consumed_tag);
+
+    // Assert that the queue wasn't consumed before this
+    // This can't be an expects clause because the acquire fence on the read
+    // would be a side-effect
+    KOKKOS_ASSERT(old_head != consumed_tag);
+
+    // We now have exclusive access to the queue; loop over it and call
+    // the user function
+    while (old_head != (node_type*)base_t::EndTag) {
+      // get the Node to make the call with
+      auto* call_arg = old_head;
+
+      // advance the head
+      old_head = LinkedListNodeAccess::next_ptr(*old_head);
+
+      // Mark as popped before proceeding
+      LinkedListNodeAccess::mark_as_not_enqueued(*call_arg);
+
+      // Call the user function
+      auto& arg = *static_cast<T*>(call_arg);
+      f(std::move(arg));
+    }
+  }
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+struct TaskQueueTraitsLockBased {
+  // TODO @tasking @documentation DSH document what concepts these match
+
+  template <class Task>
+  using ready_queue_type = LockBasedLIFO<Task>;
+
+  template <class Task>
+  using waiting_queue_type = SingleConsumeOperationLIFO<Task>;
+
+  template <class Task>
+  using intrusive_task_base_type =
+      typename ready_queue_type<Task>::intrusive_node_base_type;
+
+  static constexpr auto ready_queue_insertion_may_fail = false;
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* defined KOKKOS_ENABLE_TASKDAG */
+#endif /* #ifndef KOKKOS_IMPL_LIFO_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_LinkedListNode.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_LinkedListNode.hpp
new file mode 100644 (file)
index 0000000..1ed502d
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_LINKEDLISTNODE_HPP
+#define KOKKOS_IMPL_LINKEDLISTNODE_HPP
+
+#include <Kokkos_Macros.hpp>
+#ifdef KOKKOS_ENABLE_TASKDAG
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_PointerOwnership.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_Error.hpp>  // KOKKOS_EXPECTS
+
+#include <Kokkos_Atomic.hpp>  // atomic_compare_exchange, atomic_fence
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+struct LinkedListNodeAccess;
+
+template <uintptr_t NotEnqueuedValue             = 0,
+          template <class> class PointerTemplate = std::add_pointer>
+struct SimpleSinglyLinkedListNode {
+ private:
+  using pointer_type =
+      typename PointerTemplate<SimpleSinglyLinkedListNode>::type;
+
+  pointer_type m_next = reinterpret_cast<pointer_type>(NotEnqueuedValue);
+
+  // These are private because they are an implementation detail of the queue
+  // and should not get added to the value type's interface via the intrusive
+  // wrapper.
+
+  KOKKOS_INLINE_FUNCTION
+  void mark_as_not_enqueued() noexcept {
+    // TODO @tasking @memory_order DSH make this an atomic store with memory
+    // order
+    m_next = (pointer_type)NotEnqueuedValue;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void mark_as_not_enqueued() volatile noexcept {
+    // TODO @tasking @memory_order DSH make this an atomic store with memory
+    // order
+    m_next = (pointer_type)NotEnqueuedValue;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  pointer_type& _next_ptr() noexcept { return m_next; }
+
+  KOKKOS_INLINE_FUNCTION
+  pointer_type volatile& _next_ptr() volatile noexcept { return m_next; }
+
+  KOKKOS_INLINE_FUNCTION
+  pointer_type const& _next_ptr() const noexcept { return m_next; }
+
+  KOKKOS_INLINE_FUNCTION
+  pointer_type const volatile& _next_ptr() const volatile noexcept {
+    return m_next;
+  }
+
+  friend struct LinkedListNodeAccess;
+
+ public:
+  // constexpr
+  KOKKOS_INLINE_FUNCTION
+  bool is_enqueued() const noexcept {
+    // TODO @tasking @memory_order DSH make this an atomic load with memory
+    // order
+    return m_next != reinterpret_cast<pointer_type>(NotEnqueuedValue);
+  }
+
+  // constexpr
+  KOKKOS_INLINE_FUNCTION
+  bool is_enqueued() const volatile noexcept {
+    // TODO @tasking @memory_order DSH make this an atomic load with memory
+    // order
+    return m_next != reinterpret_cast<pointer_type>(NotEnqueuedValue);
+  }
+};
+
+/// Attorney for LinkedListNode, since user types inherit from it
+struct LinkedListNodeAccess {
+  template <class Node>
+  KOKKOS_INLINE_FUNCTION static void mark_as_not_enqueued(Node& node) noexcept {
+    node.mark_as_not_enqueued();
+  }
+
+  template <class Node>
+  KOKKOS_INLINE_FUNCTION static void mark_as_not_enqueued(
+      Node volatile& node) noexcept {
+    node.mark_as_not_enqueued();
+  }
+
+  template <class Node>
+  KOKKOS_INLINE_FUNCTION static typename Node::pointer_type& next_ptr(
+      Node& node) noexcept {
+    return node._next_ptr();
+  }
+
+  template <class Node>
+  KOKKOS_INLINE_FUNCTION static typename Node::pointer_type& next_ptr(
+      Node volatile& node) noexcept {
+    return node._next_ptr();
+  }
+
+  template <class Node>
+  KOKKOS_INLINE_FUNCTION static typename Node::pointer_type& next_ptr(
+      Node const& node) noexcept {
+    return node._next_ptr();
+  }
+
+  template <class Node>
+  KOKKOS_INLINE_FUNCTION static typename Node::pointer_type& prev_ptr(
+      Node& node) noexcept {
+    return node._prev_ptr();
+  }
+
+  template <class Node>
+  KOKKOS_INLINE_FUNCTION static typename Node::pointer_type& prev_ptr(
+      Node const& node) noexcept {
+    return node._prev_ptr();
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* defined KOKKOS_ENABLE_TASKDAG */
+#endif /* #ifndef KOKKOS_IMPL_LINKEDLISTNODE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemoryPool.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemoryPool.cpp
new file mode 100644 (file)
index 0000000..f82e88f
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <impl/Kokkos_Error.hpp>
+
+#include <ostream>
+#include <sstream>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/* Verify size constraints:
+ *   min_block_alloc_size <= max_block_alloc_size
+ *   max_block_alloc_size <= min_superblock_size
+ *   min_superblock_size  <= max_superblock_size
+ *   min_superblock_size  <= min_total_alloc_size
+ *   min_superblock_size  <= min_block_alloc_size *
+ *                           max_block_per_superblock
+ */
+void memory_pool_bounds_verification(size_t min_block_alloc_size,
+                                     size_t max_block_alloc_size,
+                                     size_t min_superblock_size,
+                                     size_t max_superblock_size,
+                                     size_t max_block_per_superblock,
+                                     size_t min_total_alloc_size) {
+  const size_t max_superblock = min_block_alloc_size * max_block_per_superblock;
+
+  if ((size_t(max_superblock_size) < min_superblock_size) ||
+      (min_total_alloc_size < min_superblock_size) ||
+      (max_superblock < min_superblock_size) ||
+      (min_superblock_size < max_block_alloc_size) ||
+      (max_block_alloc_size < min_block_alloc_size)) {
+    std::ostringstream msg;
+
+    msg << "Kokkos::MemoryPool size constraint violation";
+
+    if (size_t(max_superblock_size) < min_superblock_size) {
+      msg << " : max_superblock_size(" << max_superblock_size
+          << ") < min_superblock_size(" << min_superblock_size << ")";
+    }
+
+    if (min_total_alloc_size < min_superblock_size) {
+      msg << " : min_total_alloc_size(" << min_total_alloc_size
+          << ") < min_superblock_size(" << min_superblock_size << ")";
+    }
+
+    if (max_superblock < min_superblock_size) {
+      msg << " : max_superblock(" << max_superblock
+          << ") < min_superblock_size(" << min_superblock_size << ")";
+    }
+
+    if (min_superblock_size < max_block_alloc_size) {
+      msg << " : min_superblock_size(" << min_superblock_size
+          << ") < max_block_alloc_size(" << max_block_alloc_size << ")";
+    }
+
+    if (max_block_alloc_size < min_block_alloc_size) {
+      msg << " : max_block_alloc_size(" << max_block_alloc_size
+          << ") < min_block_alloc_size(" << min_block_alloc_size << ")";
+    }
+
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+}
+
+// This has way too many parameters, but it is entirely for moving the iostream
+// inclusion out of the header file with as few changes as possible
+void _print_memory_pool_state(std::ostream& s, uint32_t const* sb_state_ptr,
+                              int32_t sb_count, uint32_t sb_size_lg2,
+                              uint32_t sb_state_size, uint32_t state_shift,
+                              uint32_t state_used_mask) {
+  s << "pool_size(" << (size_t(sb_count) << sb_size_lg2) << ")"
+    << " superblock_size(" << (1LU << sb_size_lg2) << ")" << std::endl;
+
+  for (int32_t i = 0; i < sb_count; ++i, sb_state_ptr += sb_state_size) {
+    if (*sb_state_ptr) {
+      const uint32_t block_count_lg2 = (*sb_state_ptr) >> state_shift;
+      const uint32_t block_size_lg2  = sb_size_lg2 - block_count_lg2;
+      const uint32_t block_count     = 1u << block_count_lg2;
+      const uint32_t block_used      = (*sb_state_ptr) & state_used_mask;
+
+      s << "Superblock[ " << i << " / " << sb_count << " ] {"
+        << " block_size(" << (1 << block_size_lg2) << ")"
+        << " block_count( " << block_used << " / " << block_count << " )"
+        << std::endl;
+    }
+  }
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemoryPoolAllocator.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemoryPoolAllocator.hpp
new file mode 100644 (file)
index 0000000..7dede48
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_MEMORYPOOLALLOCATOR_HPP
+#define KOKKOS_IMPL_MEMORYPOOLALLOCATOR_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+namespace Kokkos {
+namespace Impl {
+
+template <class MemoryPool, class T>
+class MemoryPoolAllocator {
+ public:
+  using memory_pool = MemoryPool;
+
+ private:
+  memory_pool m_pool;
+
+ public:
+  KOKKOS_DEFAULTED_FUNCTION
+  MemoryPoolAllocator() = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  MemoryPoolAllocator(MemoryPoolAllocator const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  MemoryPoolAllocator(MemoryPoolAllocator&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  MemoryPoolAllocator& operator=(MemoryPoolAllocator const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  MemoryPoolAllocator& operator=(MemoryPoolAllocator&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION
+  ~MemoryPoolAllocator() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  explicit MemoryPoolAllocator(memory_pool const& arg_pool)
+      : m_pool(arg_pool) {}
+  KOKKOS_INLINE_FUNCTION
+  explicit MemoryPoolAllocator(memory_pool&& arg_pool)
+      : m_pool(std::move(arg_pool)) {}
+
+ public:
+  using value_type      = T;
+  using pointer         = T*;
+  using size_type       = typename MemoryPool::memory_space::size_type;
+  using difference_type = std::make_signed_t<size_type>;
+
+  template <class U>
+  struct rebind {
+    using other = MemoryPoolAllocator<MemoryPool, U>;
+  };
+
+  KOKKOS_INLINE_FUNCTION
+  pointer allocate(size_t n) {
+    void* rv = m_pool.allocate(n * sizeof(T));
+    if (rv == nullptr) {
+      Kokkos::abort("Kokkos MemoryPool allocator failed to allocate memory");
+    }
+    return reinterpret_cast<T*>(rv);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void deallocate(T* ptr, size_t n) { m_pool.deallocate(ptr, n * sizeof(T)); }
+
+  KOKKOS_INLINE_FUNCTION
+  size_type max_size() const { return m_pool.max_block_size(); }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator==(MemoryPoolAllocator const& other) const {
+    return m_pool == other.m_pool;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool operator!=(MemoryPoolAllocator const& other) const {
+    return !(*this == other);
+  }
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_IMPL_MEMORYPOOLALLOCATOR_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemorySpace.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemorySpace.cpp
new file mode 100644 (file)
index 0000000..a80ea0a
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/** @file Kokkos_MemorySpace.cpp
+ *
+ *  Operations common to memory space instances, or at least default
+ *  implementations thereof.
+ */
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <impl/Kokkos_MemorySpace.hpp>
+
+#include <iostream>
+#include <string>
+#include <sstream>
+
+namespace Kokkos {
+namespace Impl {
+
+void safe_throw_allocation_with_header_failure(
+    std::string const& space_name, std::string const& label,
+    Kokkos::Experimental::RawMemoryAllocationFailure const& failure) {
+  auto generate_failure_message = [&](std::ostream& o) {
+    o << "Kokkos failed to allocate memory for label \"" << label
+      << "\".  Allocation using MemorySpace named \"" << space_name
+      << "\" failed with the following error:  ";
+    failure.print_error_message(o);
+    if (failure.failure_mode() ==
+        Kokkos::Experimental::RawMemoryAllocationFailure::FailureMode::
+            AllocationNotAligned) {
+      // TODO: delete the misaligned memory?
+      o << "Warning: Allocation failed due to misalignment; memory may "
+           "be leaked.\n";
+    }
+    o.flush();
+  };
+  try {
+    std::ostringstream sstr;
+    generate_failure_message(sstr);
+    Kokkos::Impl::throw_runtime_exception(sstr.str());
+  } catch (std::bad_alloc const&) {
+    // Probably failed to allocate the string because we're so close to out
+    // of memory. Try printing to std::cerr instead
+    try {
+      generate_failure_message(std::cerr);
+    } catch (std::bad_alloc const&) {
+      // oh well, we tried...
+    }
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos encountered an allocation failure, then another allocation "
+        "failure while trying to create the error message.");
+  }
+}
+
+}  // end namespace Impl
+}  // end namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemorySpace.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MemorySpace.hpp
new file mode 100644 (file)
index 0000000..dee11bb
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//              Copyright (2019) Sandia Corporation
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/** @file Kokkos_MemorySpace.hpp
+ *
+ *  Operations common to memory space instances, or at least default
+ *  implementations thereof.
+ */
+
+#ifndef KOKKOS_IMPL_MEMORYSPACE_HPP
+#define KOKKOS_IMPL_MEMORYSPACE_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_SharedAlloc.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+#include <string>
+
+namespace Kokkos {
+namespace Impl {
+
+// Defined in implementation file to avoid having to include iostream
+void safe_throw_allocation_with_header_failure(
+    std::string const &space_name, std::string const &label,
+    Kokkos::Experimental::RawMemoryAllocationFailure const &failure);
+
+template <class MemorySpace>
+SharedAllocationHeader *checked_allocation_with_header(MemorySpace const &space,
+                                                       std::string const &label,
+                                                       size_t alloc_size) {
+  try {
+    return reinterpret_cast<SharedAllocationHeader *>(space.allocate(
+        label.c_str(), alloc_size + sizeof(SharedAllocationHeader),
+        alloc_size));
+  } catch (Kokkos::Experimental::RawMemoryAllocationFailure const &failure) {
+    safe_throw_allocation_with_header_failure(space.name(), label, failure);
+  }
+  return nullptr;  // unreachable
+}
+
+template <class ExecutionSpace, class MemorySpace>
+SharedAllocationHeader *checked_allocation_with_header(
+    ExecutionSpace const &exec_space, MemorySpace const &space,
+    std::string const &label, size_t alloc_size) {
+  try {
+    return reinterpret_cast<SharedAllocationHeader *>(space.allocate(
+        exec_space, label.c_str(), alloc_size + sizeof(SharedAllocationHeader),
+        alloc_size));
+  } catch (Kokkos::Experimental::RawMemoryAllocationFailure const &failure) {
+    safe_throw_allocation_with_header_failure(space.name(), label, failure);
+  }
+  return nullptr;  // unreachable
+}
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_IMPL_MEMORYSPACE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Memory_Fence.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Memory_Fence.hpp
new file mode 100644 (file)
index 0000000..1df5d13
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact  H. Carter Edwards (hcedwar@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_MEMORY_FENCE_HPP)
+#define KOKKOS_MEMORY_FENCE_HPP
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+#ifndef KOKKOS_ENABLE_IMPL_DESUL_ATOMICS
+KOKKOS_FORCEINLINE_FUNCTION
+void memory_fence() {
+#if defined(__CUDA_ARCH__)
+  __threadfence();
+#elif defined(KOKKOS_ENABLE_OPENMPTARGET)
+#pragma omp flush
+#elif defined(__HIP_DEVICE_COMPILE__)
+  __threadfence();
+#elif defined(KOKKOS_ENABLE_SYCL) && defined(__SYCL_DEVICE_ONLY__)
+  sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
+#elif defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+  asm volatile("mfence" ::: "memory");
+#elif defined(KOKKOS_ENABLE_GNU_ATOMICS) || \
+    (defined(KOKKOS_COMPILER_NVCC) && defined(KOKKOS_ENABLE_INTEL_ATOMICS))
+  __sync_synchronize();
+#elif defined(KOKKOS_ENABLE_INTEL_ATOMICS)
+  _mm_mfence();
+#elif defined(KOKKOS_ENABLE_OPENMP_ATOMICS)
+#pragma omp flush
+#elif defined(KOKKOS_ENABLE_WINDOWS_ATOMICS)
+  MemoryBarrier();
+#elif !defined(KOKKOS_ENABLE_SERIAL_ATOMICS)
+#error "Error: memory_fence() not defined"
+#endif
+}
+#endif
+
+//////////////////////////////////////////////////////
+// store_fence()
+//
+// If possible use a store fence on the architecture, if not run a full memory
+// fence
+
+KOKKOS_FORCEINLINE_FUNCTION
+void store_fence() {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+  asm volatile("sfence" ::: "memory");
+#else
+  memory_fence();
+#endif
+}
+
+//////////////////////////////////////////////////////
+// load_fence()
+//
+// If possible use a load fence on the architecture, if not run a full memory
+// fence
+
+KOKKOS_FORCEINLINE_FUNCTION
+void load_fence() {
+#if defined(KOKKOS_ENABLE_ASM) && defined(KOKKOS_ENABLE_ISA_X86_64)
+  asm volatile("lfence" ::: "memory");
+#else
+  memory_fence();
+#endif
+}
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MultipleTaskQueue.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_MultipleTaskQueue.hpp
new file mode 100644 (file)
index 0000000..209ba19
--- /dev/null
@@ -0,0 +1,529 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_MULTIPLETASKQUEUE_HPP
+#define KOKKOS_IMPL_MULTIPLETASKQUEUE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <impl/Kokkos_TaskBase.hpp>
+#include <impl/Kokkos_TaskResult.hpp>
+
+#include <impl/Kokkos_TaskQueueMemoryManager.hpp>
+#include <impl/Kokkos_TaskQueueCommon.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_LIFO.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+// A *non*-concurrent linked list of tasks that failed to be enqueued
+// (We can't reuse the wait queue for this because of the semantics of that
+// queue that require it to be popped exactly once, and if a task has failed
+// to be enqueued, it has already been marked ready)
+template <class TaskQueueTraits>
+struct FailedQueueInsertionLinkedListSchedulingInfo {
+  using task_base_type = TaskNode<TaskQueueTraits>;
+  task_base_type* next = nullptr;
+};
+
+struct EmptyTaskSchedulingInfo {};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class ExecSpace, class MemorySpace, class TaskQueueTraits,
+          class MemoryPool>
+class MultipleTaskQueue;
+
+template <class TaskQueueTraits>
+struct MultipleTaskQueueTeamEntry {
+ public:
+  using task_base_type          = TaskNode<TaskQueueTraits>;
+  using runnable_task_base_type = RunnableTaskBase<TaskQueueTraits>;
+  using ready_queue_type =
+      typename TaskQueueTraits::template ready_queue_type<task_base_type>;
+  using task_queue_traits         = TaskQueueTraits;
+  using task_scheduling_info_type = std::conditional_t<
+      TaskQueueTraits::ready_queue_insertion_may_fail,
+      FailedQueueInsertionLinkedListSchedulingInfo<TaskQueueTraits>,
+      EmptyTaskSchedulingInfo>;
+
+ private:
+  // Number of allowed priorities
+  static constexpr int NumPriorities = 3;
+
+  ready_queue_type m_ready_queues[NumPriorities][2];
+
+  task_base_type* m_failed_heads[NumPriorities][2];
+
+  KOKKOS_INLINE_FUNCTION
+  task_base_type*& failed_head_for(runnable_task_base_type const& task) {
+    return m_failed_heads[int(task.get_priority())][int(task.get_task_type())];
+  }
+
+  template <class _always_void = void>
+  KOKKOS_INLINE_FUNCTION OptionalRef<task_base_type> _pop_failed_insertion(
+      int priority, TaskType type,
+      std::enable_if_t<task_queue_traits::ready_queue_insertion_may_fail &&
+                           std::is_void<_always_void>::value,
+                       void*> = nullptr) {
+    auto* rv_ptr = m_failed_heads[priority][(int)type];
+    if (rv_ptr) {
+      m_failed_heads[priority][(int)type] =
+          rv_ptr->as_runnable_task()
+              .template scheduling_info_as<task_scheduling_info_type>()
+              .next;
+      return OptionalRef<task_base_type>{*rv_ptr};
+    } else {
+      return OptionalRef<task_base_type>{nullptr};
+    }
+  }
+
+  template <class _always_void = void>
+  KOKKOS_INLINE_FUNCTION OptionalRef<task_base_type> _pop_failed_insertion(
+      int /*priority*/, TaskType /*type*/,
+      std::enable_if_t<!task_queue_traits::ready_queue_insertion_may_fail &&
+                           std::is_void<_always_void>::value,
+                       void*> = nullptr) {
+    return OptionalRef<task_base_type>{nullptr};
+  }
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  MultipleTaskQueueTeamEntry() {
+    for (int iPriority = 0; iPriority < NumPriorities; ++iPriority) {
+      for (int iType = 0; iType < 2; ++iType) {
+        m_failed_heads[iPriority][iType] = nullptr;
+      }
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<task_base_type> try_to_steal_ready_task() {
+    auto return_value = OptionalRef<task_base_type>{};
+    // prefer lower priority tasks when stealing
+    for (int i_priority = NumPriorities - 1; i_priority >= 0; --i_priority) {
+      // Check for a single task with this priority
+      return_value = m_ready_queues[i_priority][TaskSingle].steal();
+      if (return_value) return return_value;
+
+      // Check for a team task with this priority
+      return_value = m_ready_queues[i_priority][TaskTeam].steal();
+      if (return_value) return return_value;
+    }
+    return return_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<task_base_type> pop_ready_task() {
+    auto return_value = OptionalRef<task_base_type>{};
+    for (int i_priority = 0; i_priority < NumPriorities; ++i_priority) {
+      return_value = _pop_failed_insertion(i_priority, TaskTeam);
+      if (!return_value)
+        return_value = m_ready_queues[i_priority][TaskTeam].pop();
+      if (return_value) return return_value;
+
+      // Check for a single task with this priority
+      return_value = _pop_failed_insertion(i_priority, TaskSingle);
+      if (!return_value)
+        return_value = m_ready_queues[i_priority][TaskSingle].pop();
+      if (return_value) return return_value;
+    }
+    return return_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  ready_queue_type& team_queue_for(runnable_task_base_type const& task) {
+    return m_ready_queues[int(task.get_priority())][int(task.get_task_type())];
+  }
+
+  template <class _always_void = void>
+  KOKKOS_INLINE_FUNCTION void do_handle_failed_insertion(
+      runnable_task_base_type&& task,
+      std::enable_if_t<task_queue_traits::ready_queue_insertion_may_fail &&
+                           std::is_void<_always_void>::value,
+                       void*> = nullptr) {
+    // failed insertions, if they happen, must be from the only thread that
+    // is allowed to push to m_ready_queues, so this linked-list insertion is
+    // not concurrent
+    auto& node  = task.template scheduling_info_as<task_scheduling_info_type>();
+    auto*& head = failed_head_for(task);
+    node.next   = head;
+    head        = &task;
+  }
+
+  template <class _always_void = void>
+  KOKKOS_INLINE_FUNCTION void do_handle_failed_insertion(
+      runnable_task_base_type&& /*task*/,
+      std::enable_if_t<!task_queue_traits::ready_queue_insertion_may_fail &&
+                           std::is_void<_always_void>::value,
+                       void*> = nullptr) {
+    Kokkos::abort("should be unreachable!");
+  }
+
+  template <class _always_void = void>
+  KOKKOS_INLINE_FUNCTION void flush_failed_insertions(
+      int priority, int task_type,
+      std::enable_if_t<
+          task_queue_traits::ready_queue_insertion_may_fail &&
+              std::is_void<_always_void>::value,  // just to make this dependent
+                                                  // on template parameter
+          int> = 0) {
+    // TODO @tasking @minor DSH this somethimes gets some things out of LIFO
+    // order, which may be undesirable (but not a bug)
+
+    auto*& failed_head = m_failed_heads[priority][task_type];
+    auto& team_queue   = m_ready_queues[priority][task_type];
+
+    while (failed_head != nullptr) {
+      bool success = team_queue.push(*failed_head);
+      if (success) {
+        // Step to the next linked list element
+        failed_head =
+            failed_head->as_runnable_task()
+                .template scheduling_info_as<task_scheduling_info_type>()
+                .next;
+      } else {
+        // no more room, stop traversing and leave the head where it is
+        break;
+      }
+    }
+  }
+
+  template <class _always_void = void>
+  KOKKOS_INLINE_FUNCTION void flush_failed_insertions(
+      int, int,
+      std::enable_if_t<
+          !task_queue_traits::ready_queue_insertion_may_fail &&
+              std::is_void<_always_void>::value,  // just to make this dependent
+                                                  // on template parameter
+          int> = 0) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void flush_all_failed_insertions() {
+    for (int iPriority = 0; iPriority < NumPriorities; ++iPriority) {
+      flush_failed_insertions(iPriority, (int)TaskType::TaskTeam);
+      flush_failed_insertions(iPriority, (int)TaskType::TaskSingle);
+    }
+  }
+
+  template <class TeamSchedulerInfo, class ExecutionSpace, class MemorySpace,
+            class MemoryPool>
+  KOKKOS_INLINE_FUNCTION void do_schedule_runnable(
+      MultipleTaskQueue<ExecutionSpace, MemorySpace, TaskQueueTraits,
+                        MemoryPool>& queue,
+      RunnableTaskBase<TaskQueueTraits>&& task, TeamSchedulerInfo const& info
+
+  ) {
+    // Push on any nodes that failed to enqueue
+    auto& team_queue = team_queue_for(task);
+    auto priority    = task.get_priority();
+    auto task_type   = task.get_task_type();
+
+    // First schedule the task
+    queue.schedule_runnable_to_queue(std::move(task), team_queue, info);
+
+    // Task may be enqueued and may be run at any point; don't touch it (hence
+    // the use of move semantics)
+    flush_failed_insertions((int)priority, (int)task_type);
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class ExecSpace, class MemorySpace, class TaskQueueTraits,
+          class MemoryPool>
+class MultipleTaskQueue final
+    : public TaskQueueMemoryManager<ExecSpace, MemorySpace, MemoryPool>,
+      public TaskQueueCommonMixin<MultipleTaskQueue<
+          ExecSpace, MemorySpace, TaskQueueTraits, MemoryPool>>,
+      private ObjectWithVLAEmulation<
+          MultipleTaskQueue<ExecSpace, MemorySpace, TaskQueueTraits,
+                            MemoryPool>,
+          MultipleTaskQueueTeamEntry<TaskQueueTraits>> {
+ public:
+  using task_queue_type   = MultipleTaskQueue;  // mark as task_queue concept
+  using task_queue_traits = TaskQueueTraits;
+  using task_base_type    = TaskNode<TaskQueueTraits>;
+  using ready_queue_type =
+      typename TaskQueueTraits::template ready_queue_type<task_base_type>;
+
+ private:
+  using base_t = TaskQueueMemoryManager<ExecSpace, MemorySpace, MemoryPool>;
+  using common_mixin_t       = TaskQueueCommonMixin<MultipleTaskQueue>;
+  using vla_emulation_base_t = ObjectWithVLAEmulation<
+      MultipleTaskQueue<ExecSpace, MemorySpace, TaskQueueTraits, MemoryPool>,
+      MultipleTaskQueueTeamEntry<TaskQueueTraits>>;
+
+  // Allow private inheritance from ObjectWithVLAEmulation
+  friend struct VLAEmulationAccess;
+
+ public:
+  struct SchedulerInfo {
+    using team_queue_id_t                             = int32_t;
+    static constexpr team_queue_id_t NoAssociatedTeam = -1;
+    team_queue_id_t team_association                  = NoAssociatedTeam;
+
+    using scheduler_info_type = SchedulerInfo;
+
+    KOKKOS_INLINE_FUNCTION
+    constexpr explicit SchedulerInfo(team_queue_id_t association) noexcept
+        : team_association(association) {}
+
+    KOKKOS_DEFAULTED_FUNCTION
+    SchedulerInfo() = default;
+
+    KOKKOS_DEFAULTED_FUNCTION
+    SchedulerInfo(SchedulerInfo const&) = default;
+
+    KOKKOS_DEFAULTED_FUNCTION
+    SchedulerInfo(SchedulerInfo&&) = default;
+
+    KOKKOS_DEFAULTED_FUNCTION
+    SchedulerInfo& operator=(SchedulerInfo const&) = default;
+
+    KOKKOS_DEFAULTED_FUNCTION
+    SchedulerInfo& operator=(SchedulerInfo&&) = default;
+
+    KOKKOS_DEFAULTED_FUNCTION
+    ~SchedulerInfo() = default;
+  };
+
+  using task_scheduling_info_type = std::conditional_t<
+      TaskQueueTraits::ready_queue_insertion_may_fail,
+      FailedQueueInsertionLinkedListSchedulingInfo<TaskQueueTraits>,
+      EmptyTaskSchedulingInfo>;
+  using team_scheduler_info_type = SchedulerInfo;
+
+  using runnable_task_base_type = RunnableTaskBase<TaskQueueTraits>;
+
+  template <class Functor, class Scheduler>
+  // requires TaskScheduler<Scheduler> && TaskFunctor<Functor>
+  using runnable_task_type =
+      RunnableTask<task_queue_traits, Scheduler, typename Functor::value_type,
+                   Functor>;
+
+  using aggregate_task_type =
+      AggregateTask<task_queue_traits, task_scheduling_info_type>;
+
+  // Number of allowed priorities
+  static constexpr int NumPriorities = 3;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr typename vla_emulation_base_t::vla_entry_count_type n_queues() const
+      noexcept {
+    return this->n_vla_entries();
+  }
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Constructors, destructors, and assignment"> {{{2
+
+  MultipleTaskQueue()                         = delete;
+  MultipleTaskQueue(MultipleTaskQueue const&) = delete;
+  MultipleTaskQueue(MultipleTaskQueue&&)      = delete;
+  MultipleTaskQueue& operator=(MultipleTaskQueue const&) = delete;
+  MultipleTaskQueue& operator=(MultipleTaskQueue&&) = delete;
+
+  MultipleTaskQueue(typename base_t::execution_space const& arg_execution_space,
+                    typename base_t::memory_space const&,
+                    typename base_t::memory_pool const& arg_memory_pool)
+      : base_t(arg_memory_pool),
+        vla_emulation_base_t(
+            Impl::TaskQueueSpecialization<
+                // TODO @tasking @generalization DSH avoid referencing
+                // SimpleTaskScheduler directly?
+                SimpleTaskScheduler<typename base_t::execution_space,
+                                    MultipleTaskQueue>>::
+                get_max_team_count(arg_execution_space)) {}
+
+  // </editor-fold> end Constructors, destructors, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  KOKKOS_FUNCTION
+  void schedule_runnable(runnable_task_base_type&& task,
+                         team_scheduler_info_type const& info) {
+    auto team_association = info.team_association;
+    // Should only not be assigned if this is a host spawn...
+    if (team_association == team_scheduler_info_type::NoAssociatedTeam) {
+      team_association = 0;
+    }
+    this->vla_value_at(team_association)
+        .do_schedule_runnable(*this, std::move(task), info);
+    // Task may be enqueued and may be run at any point; don't touch it (hence
+    // the use of move semantics)
+  }
+
+  KOKKOS_FUNCTION
+  OptionalRef<task_base_type> pop_ready_task(
+      team_scheduler_info_type const& info) {
+    KOKKOS_EXPECTS(info.team_association !=
+                   team_scheduler_info_type::NoAssociatedTeam);
+
+    auto return_value     = OptionalRef<task_base_type>{};
+    auto team_association = info.team_association;
+
+    // always loop in order of priority first, then prefer team tasks over
+    // single tasks
+    auto& team_queue_info = this->vla_value_at(team_association);
+
+    if (task_queue_traits::ready_queue_insertion_may_fail) {
+      team_queue_info.flush_all_failed_insertions();
+    }
+
+    return_value = team_queue_info.pop_ready_task();
+
+    if (!return_value) {
+      // loop through the rest of the teams and try to steal
+      for (auto isteal = (team_association + 1) % this->n_queues();
+           isteal != team_association;
+           isteal = (isteal + 1) % this->n_queues()) {
+        return_value = this->vla_value_at(isteal).try_to_steal_ready_task();
+        if (return_value) {
+          break;
+        }
+      }
+
+      // Note that this is where we'd update the task's scheduling info
+    }
+    // if nothing was found, return a default-constructed (empty) OptionalRef
+    return return_value;
+  }
+
+  // TODO @tasking @generalization DSH make this a property-based customization
+  // point
+  KOKKOS_INLINE_FUNCTION
+  team_scheduler_info_type initial_team_scheduler_info(int rank_in_league) const
+      noexcept {
+    return team_scheduler_info_type{
+        typename team_scheduler_info_type::team_queue_id_t(rank_in_league %
+                                                           n_queues())};
+  }
+
+  // TODO @tasking @generalization DSH make this a property-based customization
+  // point
+  static /* constexpr */ size_t task_queue_allocation_size(
+      typename base_t::execution_space const& exec_space,
+      typename base_t::memory_space const&,
+      typename base_t::memory_pool const&) {
+    using specialization = Impl::TaskQueueSpecialization<
+        // TODO @tasking @generalization DSH avoid referencing
+        // SimpleTaskScheduler directly?
+        SimpleTaskScheduler<typename base_t::execution_space,
+                            MultipleTaskQueue>>;
+
+    return vla_emulation_base_t::required_allocation_size(
+        /* num_vla_entries = */ specialization::get_max_team_count(exec_space));
+  }
+
+  // Provide a sensible default that can be overridden
+  KOKKOS_INLINE_FUNCTION
+  void update_scheduling_info_from_completed_predecessor(
+      runnable_task_base_type& /*ready_task*/,
+      runnable_task_base_type const& /*predecessor*/) const {
+    // Do nothing; we're using the extra storage for the failure linked list
+  }
+
+  // Provide a sensible default that can be overridden
+  KOKKOS_INLINE_FUNCTION
+  void update_scheduling_info_from_completed_predecessor(
+      aggregate_task_type& /*aggregate*/,
+      runnable_task_base_type const& /*predecessor*/) const {
+    // Do nothing; we're using the extra storage for the failure linked list
+  }
+
+  // Provide a sensible default that can be overridden
+  KOKKOS_INLINE_FUNCTION
+  void update_scheduling_info_from_completed_predecessor(
+      aggregate_task_type& /*aggregate*/,
+      aggregate_task_type const& /*predecessor*/) const {
+    // Do nothing; we're using the extra storage for the failure linked list
+  }
+
+  // Provide a sensible default that can be overridden
+  KOKKOS_INLINE_FUNCTION
+  void update_scheduling_info_from_completed_predecessor(
+      runnable_task_base_type& /*ready_task*/,
+      aggregate_task_type const& /*predecessor*/) const {
+    // Do nothing; we're using the extra storage for the failure linked list
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void handle_failed_ready_queue_insertion(
+      runnable_task_base_type&& task, ready_queue_type&,
+      team_scheduler_info_type const& info) {
+    KOKKOS_EXPECTS(info.team_association !=
+                   team_scheduler_info_type::NoAssociatedTeam);
+
+    this->vla_value_at(info.team_association)
+        .do_handle_failed_insertion(std::move(task));
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_MULTIPLETASKQUEUE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_NumericTraits.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_NumericTraits.cpp
new file mode 100644 (file)
index 0000000..5ff0940
--- /dev/null
@@ -0,0 +1,82 @@
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_NumericTraits.hpp>
+
+// NOTE These out-of class definitions are only required with C++14.  Since
+// C++17, a static data member declared constexpr is implicitly inline.
+
+#if !defined(KOKKOS_ENABLE_CXX17)
+namespace Kokkos {
+namespace Experimental {
+namespace Impl {
+#define OUT_OF_CLASS_DEFINITION_FLOATING_POINT(TRAIT) \
+  constexpr float TRAIT##_helper<float>::value;       \
+  constexpr double TRAIT##_helper<double>::value;     \
+  constexpr long double TRAIT##_helper<long double>::value
+
+#define OUT_OF_CLASS_DEFINITION_INTEGRAL(TRAIT)                         \
+  constexpr bool TRAIT##_helper<bool>::value;                           \
+  constexpr char TRAIT##_helper<char>::value;                           \
+  constexpr signed char TRAIT##_helper<signed char>::value;             \
+  constexpr unsigned char TRAIT##_helper<unsigned char>::value;         \
+  constexpr short TRAIT##_helper<short>::value;                         \
+  constexpr unsigned short TRAIT##_helper<unsigned short>::value;       \
+  constexpr int TRAIT##_helper<int>::value;                             \
+  constexpr unsigned int TRAIT##_helper<unsigned int>::value;           \
+  constexpr long int TRAIT##_helper<long int>::value;                   \
+  constexpr unsigned long int TRAIT##_helper<unsigned long int>::value; \
+  constexpr long long int TRAIT##_helper<long long int>::value;         \
+  constexpr unsigned long long int TRAIT##_helper<unsigned long long int>::value
+
+#define OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(TRAIT) \
+  constexpr int TRAIT##_helper<float>::value;           \
+  constexpr int TRAIT##_helper<double>::value;          \
+  constexpr int TRAIT##_helper<long double>::value
+
+#define OUT_OF_CLASS_DEFINITION_INTEGRAL_2(TRAIT)         \
+  constexpr int TRAIT##_helper<bool>::value;              \
+  constexpr int TRAIT##_helper<char>::value;              \
+  constexpr int TRAIT##_helper<signed char>::value;       \
+  constexpr int TRAIT##_helper<unsigned char>::value;     \
+  constexpr int TRAIT##_helper<short>::value;             \
+  constexpr int TRAIT##_helper<unsigned short>::value;    \
+  constexpr int TRAIT##_helper<int>::value;               \
+  constexpr int TRAIT##_helper<unsigned int>::value;      \
+  constexpr int TRAIT##_helper<long int>::value;          \
+  constexpr int TRAIT##_helper<unsigned long int>::value; \
+  constexpr int TRAIT##_helper<long long int>::value;     \
+  constexpr int TRAIT##_helper<unsigned long long int>::value
+
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(infinity);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(epsilon);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(round_error);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(norm_min);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(denorm_min);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(reciprocal_overflow_threshold);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(quiet_NaN);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(signaling_NaN);
+
+OUT_OF_CLASS_DEFINITION_INTEGRAL(finite_min);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(finite_min);
+OUT_OF_CLASS_DEFINITION_INTEGRAL(finite_max);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT(finite_max);
+
+OUT_OF_CLASS_DEFINITION_INTEGRAL_2(digits);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(digits);
+OUT_OF_CLASS_DEFINITION_INTEGRAL_2(digits10);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(digits10);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(max_digits10);
+OUT_OF_CLASS_DEFINITION_INTEGRAL_2(radix);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(radix);
+
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(min_exponent);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(min_exponent10);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(max_exponent);
+OUT_OF_CLASS_DEFINITION_FLOATING_POINT_2(max_exponent10);
+}  // namespace Impl
+}  // namespace Experimental
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_OptionalRef.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_OptionalRef.hpp
new file mode 100644 (file)
index 0000000..1a3cbab
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_OPTIONALREF_HPP
+#define KOKKOS_IMPL_OPTIONALREF_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_PointerOwnership.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+namespace Kokkos {
+namespace Impl {
+
+struct InPlaceTag {};
+
+template <class T>
+struct OptionalRef {
+ private:
+  ObservingRawPtr<T> m_value = nullptr;
+
+ public:
+  using value_type = T;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  OptionalRef() = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  OptionalRef(OptionalRef const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  OptionalRef(OptionalRef&&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  // MSVC requires that this copy constructor is not defaulted
+  // if there exists a (non-defaulted) volatile one.
+  OptionalRef& operator=(OptionalRef const& other) noexcept {
+    m_value = other.m_value;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  // Can't return a reference to volatile OptionalRef, since GCC issues a
+  // warning about reference to volatile not accessing the underlying value
+  void operator=(OptionalRef const volatile& other) volatile noexcept {
+    m_value = other.m_value;
+  }
+
+  KOKKOS_DEFAULTED_FUNCTION
+  OptionalRef& operator=(OptionalRef&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~OptionalRef() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  explicit OptionalRef(T& arg_value) : m_value(&arg_value) {}
+
+  KOKKOS_INLINE_FUNCTION
+  explicit OptionalRef(std::nullptr_t) : m_value(nullptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef& operator=(T& arg_value) {
+    m_value = &arg_value;
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef& operator=(std::nullptr_t) {
+    m_value = nullptr;
+    return *this;
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<std::add_volatile_t<T>> as_volatile() volatile noexcept {
+    return OptionalRef<std::add_volatile_t<T>>(*(*this));
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  OptionalRef<std::add_volatile_t<std::add_const_t<T>>> as_volatile() const
+      volatile noexcept {
+    return OptionalRef<std::add_volatile_t<std::add_const_t<T>>>(*(*this));
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  T& operator*() & {
+    KOKKOS_EXPECTS(this->has_value());
+    return *m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T const& operator*() const& {
+    KOKKOS_EXPECTS(this->has_value());
+    return *m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T volatile& operator*() volatile& {
+    KOKKOS_EXPECTS(this->has_value());
+    return *m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T const volatile& operator*() const volatile& {
+    KOKKOS_EXPECTS(this->has_value());
+    return *m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T&& operator*() && {
+    KOKKOS_EXPECTS(this->has_value());
+    return std::move(*m_value);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T* operator->() {
+    KOKKOS_EXPECTS(this->has_value());
+    return m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T const* operator->() const {
+    KOKKOS_EXPECTS(this->has_value());
+    return m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T volatile* operator->() volatile {
+    KOKKOS_EXPECTS(this->has_value());
+    return m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T const volatile* operator->() const volatile {
+    KOKKOS_EXPECTS(this->has_value());
+    return m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  T* get() { return m_value; }
+
+  KOKKOS_INLINE_FUNCTION
+  T const* get() const { return m_value; }
+
+  KOKKOS_INLINE_FUNCTION
+  T volatile* get() volatile { return m_value; }
+
+  KOKKOS_INLINE_FUNCTION
+  T const volatile* get() const volatile { return m_value; }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  operator bool() { return m_value != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  operator bool() const { return m_value != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  operator bool() volatile { return m_value != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  operator bool() const volatile { return m_value != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool has_value() { return m_value != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool has_value() const { return m_value != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool has_value() volatile { return m_value != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  bool has_value() const volatile { return m_value != nullptr; }
+};
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_IMPL_OPTIONALREF_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ParseCommandLineArgumentsAndEnvironmentVariables.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ParseCommandLineArgumentsAndEnvironmentVariables.hpp
new file mode 100644 (file)
index 0000000..4fdb85b
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_PARSE_COMMAND_LINE_ARGUMENTS_AND_ENVIRONMENT_VARIABLES_HPP
+#define KOKKOS_PARSE_COMMAND_LINE_ARGUMENTS_AND_ENVIRONMENT_VARIABLES_HPP
+
+// These declaration are only provided for testing purposes
+namespace Kokkos {
+class InitializationSettings;
+namespace Impl {
+void parse_command_line_arguments(int& argc, char* argv[],
+                                  InitializationSettings& settings);
+void parse_environment_variables(InitializationSettings& settings);
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_PhysicalLayout.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_PhysicalLayout.hpp
new file mode 100644 (file)
index 0000000..bc0a7df
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_PHYSICAL_LAYOUT_HPP
+#define KOKKOS_PHYSICAL_LAYOUT_HPP
+
+#include <Kokkos_View.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+struct PhysicalLayout {
+  enum LayoutType { Left, Right, Scalar, Error };
+  LayoutType layout_type;
+  int rank;
+  long long int stride[9];  // distance between two neighboring elements in a
+                            // given dimension
+
+  template <class T, class L, class D, class M>
+  PhysicalLayout(const View<T, L, D, M>& view)
+      : layout_type(
+            is_same<typename View<T, L, D, M>::array_layout, LayoutLeft>::value
+                ? Left
+                : (is_same<typename View<T, L, D, M>::array_layout,
+                           LayoutRight>::value
+                       ? Right
+                       : Error)),
+        rank(view.Rank) {
+    for (int i = 0; i < 9; i++) stride[i] = 0;
+    view.stride(stride);
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling.cpp
new file mode 100644 (file)
index 0000000..480b1a3
--- /dev/null
@@ -0,0 +1,1296 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#ifndef KOKKOS_TOOLS_INDEPENDENT_BUILD
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Tuners.hpp>
+#endif
+
+#include <impl/Kokkos_Profiling.hpp>
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <impl/Kokkos_Command_Line_Parsing.hpp>
+
+#if defined(KOKKOS_ENABLE_LIBDL) || defined(KOKKOS_TOOLS_INDEPENDENT_BUILD)
+#include <dlfcn.h>
+#define KOKKOS_TOOLS_ENABLE_LIBDL
+#endif
+
+#include <algorithm>
+#include <array>
+#include <cstring>
+#include <iostream>
+#include <memory>
+#include <stack>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+#include <sstream>
+#include <iostream>
+
+namespace {
+void warn_cmd_line_arg_ignored_when_kokkos_tools_disabled(char const* arg) {
+#ifndef KOKKOS_TOOLS_ENABLE_LIBDL
+  if (Kokkos::show_warnings()) {
+    std::cerr << "Warning: command line argument '" << arg
+              << "' ignored because kokkos-tools is disabled."
+              << " Raised by Kokkos::initialize()." << std::endl;
+  }
+#else
+  (void)arg;
+#endif
+}
+void warn_env_var_ignored_when_kokkos_tools_disabled(char const* env_var,
+                                                     char const* val) {
+#ifndef KOKKOS_TOOLS_ENABLE_LIBDL
+  if (Kokkos::show_warnings()) {
+    std::cerr << "Warning: environment variable '" << env_var << "=" << val
+              << "' ignored because kokkos-tools is disabled."
+              << " Raised by Kokkos::initialize()." << std::endl;
+  }
+#else
+  (void)env_var;
+  (void)val;
+#endif
+}
+}  // namespace
+
+namespace Kokkos {
+
+namespace Tools {
+
+const std::string InitArguments::unset_string_option = {
+    "kokkos_tools_impl_unset_option"};
+
+InitArguments tool_arguments;
+
+namespace Impl {
+void parse_command_line_arguments(int& argc, char* argv[],
+                                  InitArguments& arguments) {
+  int iarg = 0;
+  using Kokkos::Impl::check_arg;
+  using Kokkos::Impl::check_arg_str;
+
+  auto& libs = arguments.lib;
+  auto& args = arguments.args;
+  auto& help = arguments.help;
+  while (iarg < argc) {
+    bool remove_flag = false;
+    if (check_arg_str(argv[iarg], "--kokkos-tools-libs", libs) ||
+        check_arg_str(argv[iarg], "--kokkos-tools-library", libs)) {
+      if (check_arg(argv[iarg], "--kokkos-tools-library")) {
+        using Kokkos::Impl::warn_deprecated_command_line_argument;
+        warn_deprecated_command_line_argument("--kokkos-tools-library",
+                                              "--kokkos-tools-libs");
+      }
+      warn_cmd_line_arg_ignored_when_kokkos_tools_disabled(argv[iarg]);
+      remove_flag = true;
+    } else if (check_arg_str(argv[iarg], "--kokkos-tools-args", args)) {
+      warn_cmd_line_arg_ignored_when_kokkos_tools_disabled(argv[iarg]);
+      remove_flag = true;
+      // strip any leading and/or trailing quotes if they were retained in the
+      // string because this will very likely cause parsing issues for tools.
+      // If the quotes are retained (via bypassing the shell):
+      //    <EXE> --kokkos-tools-args="-c my example"
+      // would be tokenized as:
+      //    "<EXE>" "\"-c" "my" "example\""
+      // instead of:
+      //    "<EXE>" "-c" "my" "example"
+      if (!args.empty()) {
+        if (args.front() == '"') args = args.substr(1);
+        if (args.back() == '"') args = args.substr(0, args.length() - 1);
+      }
+      // add the name of the executable to the beginning
+      if (argc > 0) args = std::string(argv[0]) + " " + args;
+    } else if (check_arg(argv[iarg], "--kokkos-tools-help")) {
+      help = InitArguments::PossiblyUnsetOption::on;
+      warn_cmd_line_arg_ignored_when_kokkos_tools_disabled(argv[iarg]);
+      remove_flag = true;
+    } else if (std::regex_match(argv[iarg], std::regex("-?-kokkos-tool.*",
+                                                       std::regex::egrep))) {
+      std::cerr << "Warning: command line argument '" << argv[iarg]
+                << "' is not recognized."
+                << " Raised by Kokkos::initialize()." << std::endl;
+    }
+    if (remove_flag) {
+      // Shift the remainder of the argv list by one.  Note that argv has
+      // (argc + 1) arguments, the last one always being nullptr.  The following
+      // loop moves the trailing nullptr element as well
+      for (int k = iarg; k < argc; ++k) {
+        argv[k] = argv[k + 1];
+      }
+      argc--;
+    } else {
+      iarg++;
+    }
+    if ((args == Kokkos::Tools::InitArguments::unset_string_option) && argc > 0)
+      args = argv[0];
+  }
+}
+Kokkos::Tools::Impl::InitializationStatus parse_environment_variables(
+    InitArguments& arguments) {
+  auto& libs               = arguments.lib;
+  auto& args               = arguments.args;
+  auto env_profile_library = std::getenv("KOKKOS_PROFILE_LIBRARY");
+  if (env_profile_library != nullptr) {
+    using Kokkos::Impl::warn_deprecated_environment_variable;
+    warn_deprecated_environment_variable("KOKKOS_PROFILE_LIBRARY",
+                                         "KOKKOS_TOOLS_LIBS");
+    warn_env_var_ignored_when_kokkos_tools_disabled("KOKKOS_PROFILE_LIBRARY",
+                                                    env_profile_library);
+    libs = env_profile_library;
+  }
+  auto env_tools_libs = std::getenv("KOKKOS_TOOLS_LIBS");
+  if (env_tools_libs != nullptr) {
+    warn_env_var_ignored_when_kokkos_tools_disabled("KOKKOS_TOOLS_LIBS",
+                                                    env_tools_libs);
+    if (env_profile_library != nullptr && libs != env_tools_libs) {
+      std::stringstream ss;
+      ss << "Error: environment variables 'KOKKOS_PROFILE_LIBRARY="
+         << env_profile_library << "' and 'KOKKOS_TOOLS_LIBS=" << env_tools_libs
+         << "' are both set and do not match."
+         << " Raised by Kokkos::initialize().\n";
+      Kokkos::abort(ss.str().c_str());
+    }
+    libs = env_tools_libs;
+  }
+  auto env_tools_args = std::getenv("KOKKOS_TOOLS_ARGS");
+  if (env_tools_args != nullptr) {
+    warn_env_var_ignored_when_kokkos_tools_disabled("KOKKOS_TOOLS_ARGS",
+                                                    env_tools_args);
+    args = env_tools_args;
+  }
+  return {
+      Kokkos::Tools::Impl::InitializationStatus::InitializationResult::success};
+}
+InitializationStatus initialize_tools_subsystem(
+    const Kokkos::Tools::InitArguments& args) {
+#ifdef KOKKOS_TOOLS_ENABLE_LIBDL
+  Kokkos::Profiling::initialize(args.lib);
+  auto final_args =
+      (args.args != Kokkos::Tools::InitArguments::unset_string_option)
+          ? args.args
+          : "";
+
+  if (args.help) {
+    if (!Kokkos::Tools::printHelp(final_args)) {
+      std::cerr << "Tool has not provided a help message" << std::endl;
+    }
+    return {InitializationStatus::InitializationResult::help_request};
+  }
+  Kokkos::Tools::parseArgs(final_args);
+#else
+  (void)args;
+#endif
+  return {InitializationStatus::InitializationResult::success};
+}
+
+}  // namespace Impl
+void initialize(const InitArguments& arguments) {
+  Impl::initialize_tools_subsystem(arguments);
+}
+void initialize(int argc, char* argv[]) {
+  InitArguments arguments;
+  Impl::parse_environment_variables(arguments);
+  Impl::parse_command_line_arguments(argc, argv, arguments);
+  initialize(arguments);
+}
+
+namespace Experimental {
+
+namespace Impl {
+void tool_invoked_fence(const uint32_t /* devID */) {
+  /**
+   * Currently the function ignores the device ID,
+   * Eventually we want to support fencing only
+   * a given stream/resource
+   */
+#ifndef KOKKOS_TOOLS_INDEPENDENT_BUILD
+  Kokkos::fence(
+      "Kokkos::Tools::Experimental::Impl::tool_invoked_fence: Tool Requested "
+      "Fence");
+#endif
+}
+}  // namespace Impl
+
+#ifdef KOKKOS_ENABLE_TUNING
+static size_t kernel_name_context_variable_id;
+static size_t kernel_type_context_variable_id;
+static std::unordered_map<size_t, std::unordered_set<size_t>>
+    features_per_context;
+static std::unordered_set<size_t> active_features;
+static std::unordered_map<size_t, VariableValue> feature_values;
+static std::unordered_map<size_t, VariableInfo> variable_metadata;
+#endif
+static EventSet current_callbacks;
+static EventSet backup_callbacks;
+static EventSet no_profiling;
+static ToolSettings tool_requirements;
+bool eventSetsEqual(const EventSet& l, const EventSet& r) {
+  return l.init == r.init && l.finalize == r.finalize &&
+         l.parse_args == r.parse_args && l.print_help == r.print_help &&
+         l.begin_parallel_for == r.begin_parallel_for &&
+         l.end_parallel_for == r.end_parallel_for &&
+         l.begin_parallel_reduce == r.begin_parallel_reduce &&
+         l.end_parallel_reduce == r.end_parallel_reduce &&
+         l.begin_parallel_scan == r.begin_parallel_scan &&
+         l.end_parallel_scan == r.end_parallel_scan &&
+         l.push_region == r.push_region && l.pop_region == r.pop_region &&
+         l.allocate_data == r.allocate_data &&
+         l.deallocate_data == r.deallocate_data &&
+         l.create_profile_section == r.create_profile_section &&
+         l.start_profile_section == r.start_profile_section &&
+         l.stop_profile_section == r.stop_profile_section &&
+         l.destroy_profile_section == r.destroy_profile_section &&
+         l.profile_event == r.profile_event &&
+         l.begin_deep_copy == r.begin_deep_copy &&
+         l.end_deep_copy == r.end_deep_copy && l.begin_fence == r.begin_fence &&
+         l.end_fence == r.end_fence && l.sync_dual_view == r.sync_dual_view &&
+         l.modify_dual_view == r.modify_dual_view &&
+         l.declare_metadata == r.declare_metadata &&
+         l.request_tool_settings == r.request_tool_settings &&
+         l.provide_tool_programming_interface ==
+             r.provide_tool_programming_interface &&
+         l.declare_input_type == r.declare_input_type &&
+         l.declare_output_type == r.declare_output_type &&
+         l.end_tuning_context == r.end_tuning_context &&
+         l.begin_tuning_context == r.begin_tuning_context &&
+         l.request_output_values == r.request_output_values &&
+         l.declare_optimization_goal == r.declare_optimization_goal;
+}
+enum class MayRequireGlobalFencing : bool { No, Yes };
+template <typename Callback, typename... Args>
+inline void invoke_kokkosp_callback(
+    MayRequireGlobalFencing may_require_global_fencing,
+    const Callback& callback, Args&&... args) {
+  if (callback != nullptr) {
+    // two clause if statement
+    // may_require_global_fencing: "if this callback ever needs a fence", AND
+    // if the tool requires global fencing (default true, but tools can
+    // overwrite)
+    if (may_require_global_fencing == MayRequireGlobalFencing::Yes &&
+        (tool_requirements.requires_global_fencing)) {
+#ifndef KOKKOS_TOOLS_INDEPENDENT_BUILD
+      Kokkos::fence(
+          "Kokkos::Tools::invoke_kokkosp_callback: Kokkos Profile Tool Fence");
+#endif
+    }
+    (*callback)(std::forward<Args>(args)...);
+  }
+}
+}  // namespace Experimental
+bool profileLibraryLoaded() {
+  return !Experimental::eventSetsEqual(Experimental::current_callbacks,
+                                       Experimental::no_profiling);
+}
+
+void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
+                      uint64_t* kernelID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.begin_parallel_for, kernelPrefix.c_str(),
+      devID, kernelID);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Kokkos::tune_internals()) {
+    auto context_id = Experimental::get_new_context_id();
+    Experimental::begin_context(context_id);
+    Experimental::VariableValue contextValues[] = {
+        Experimental::make_variable_value(
+            Experimental::kernel_name_context_variable_id, kernelPrefix),
+        Experimental::make_variable_value(
+            Experimental::kernel_type_context_variable_id, "parallel_for")};
+    Experimental::set_input_values(context_id, 2, contextValues);
+  }
+#endif
+}
+
+void endParallelFor(const uint64_t kernelID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.end_parallel_for, kernelID);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Kokkos::tune_internals()) {
+    Experimental::end_context(Experimental::get_current_context_id());
+  }
+#endif
+}
+
+void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
+                       uint64_t* kernelID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.begin_parallel_scan, kernelPrefix.c_str(),
+      devID, kernelID);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Kokkos::tune_internals()) {
+    auto context_id = Experimental::get_new_context_id();
+    Experimental::begin_context(context_id);
+    Experimental::VariableValue contextValues[] = {
+        Experimental::make_variable_value(
+            Experimental::kernel_name_context_variable_id, kernelPrefix),
+        Experimental::make_variable_value(
+            Experimental::kernel_type_context_variable_id, "parallel_for")};
+    Experimental::set_input_values(context_id, 2, contextValues);
+  }
+#endif
+}
+
+void endParallelScan(const uint64_t kernelID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.end_parallel_scan, kernelID);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Kokkos::tune_internals()) {
+    Experimental::end_context(Experimental::get_current_context_id());
+  }
+#endif
+}
+
+void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
+                         uint64_t* kernelID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.begin_parallel_reduce,
+      kernelPrefix.c_str(), devID, kernelID);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Kokkos::tune_internals()) {
+    auto context_id = Experimental::get_new_context_id();
+    Experimental::begin_context(context_id);
+    Experimental::VariableValue contextValues[] = {
+        Experimental::make_variable_value(
+            Experimental::kernel_name_context_variable_id, kernelPrefix),
+        Experimental::make_variable_value(
+            Experimental::kernel_type_context_variable_id, "parallel_for")};
+    Experimental::set_input_values(context_id, 2, contextValues);
+  }
+#endif
+}
+
+void endParallelReduce(const uint64_t kernelID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.end_parallel_reduce, kernelID);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Kokkos::tune_internals()) {
+    Experimental::end_context(Experimental::get_current_context_id());
+  }
+#endif
+}
+
+void pushRegion(const std::string& kName) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.push_region, kName.c_str());
+}
+
+void popRegion() {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::Yes,
+      Experimental::current_callbacks.pop_region);
+}
+
+void allocateData(const SpaceHandle space, const std::string label,
+                  const void* ptr, const uint64_t size) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.allocate_data, space, label.c_str(), ptr,
+      size);
+}
+
+void deallocateData(const SpaceHandle space, const std::string label,
+                    const void* ptr, const uint64_t size) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.deallocate_data, space, label.c_str(),
+      ptr, size);
+}
+
+void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
+                   const void* dst_ptr, const SpaceHandle src_space,
+                   const std::string src_label, const void* src_ptr,
+                   const uint64_t size) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.begin_deep_copy, dst_space,
+      dst_label.c_str(), dst_ptr, src_space, src_label.c_str(), src_ptr, size);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Experimental::current_callbacks.begin_deep_copy != nullptr) {
+    if (Kokkos::tune_internals()) {
+      auto context_id = Experimental::get_new_context_id();
+      Experimental::begin_context(context_id);
+      Experimental::VariableValue contextValues[] = {
+          Experimental::make_variable_value(
+              Experimental::kernel_name_context_variable_id,
+              "deep_copy_kernel"),
+          Experimental::make_variable_value(
+              Experimental::kernel_type_context_variable_id, "deep_copy")};
+      Experimental::set_input_values(context_id, 2, contextValues);
+    }
+  }
+#endif
+}
+
+void endDeepCopy() {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.end_deep_copy);
+#ifdef KOKKOS_ENABLE_TUNING
+  if (Experimental::current_callbacks.end_deep_copy != nullptr) {
+    if (Kokkos::tune_internals()) {
+      Experimental::end_context(Experimental::get_current_context_id());
+    }
+  }
+#endif
+}
+
+void beginFence(const std::string name, const uint32_t deviceId,
+                uint64_t* handle) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.begin_fence, name.c_str(), deviceId,
+      handle);
+}
+
+void endFence(const uint64_t handle) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.end_fence, handle);
+}
+
+void createProfileSection(const std::string& sectionName, uint32_t* secID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.create_profile_section,
+      sectionName.c_str(), secID);
+}
+
+void startSection(const uint32_t secID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.start_profile_section, secID);
+}
+
+void stopSection(const uint32_t secID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.stop_profile_section, secID);
+}
+
+void destroyProfileSection(const uint32_t secID) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.destroy_profile_section, secID);
+}
+
+void markEvent(const std::string& eventName) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.profile_event, eventName.c_str());
+}
+
+bool printHelp(const std::string& args) {
+  if (Experimental::current_callbacks.print_help == nullptr) {
+    return false;
+  }
+  std::string arg0  = args.substr(0, args.find_first_of(' '));
+  const char* carg0 = arg0.c_str();
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.print_help, const_cast<char*>(carg0));
+  return true;
+}
+
+void parseArgs(int _argc, char** _argv) {
+  if (Experimental::current_callbacks.parse_args != nullptr && _argc > 0) {
+    Experimental::invoke_kokkosp_callback(
+        Experimental::MayRequireGlobalFencing::No,
+        Experimental::current_callbacks.parse_args, _argc, _argv);
+  }
+}
+
+void parseArgs(const std::string& args) {
+  if (Experimental::current_callbacks.parse_args == nullptr) {
+    return;
+  }
+  using strvec_t = std::vector<std::string>;
+  auto tokenize  = [](const std::string& line, const std::string& delimiters) {
+    strvec_t _result{};
+    std::size_t _bidx = 0;  // position that is the beginning of the new string
+    std::size_t _didx = 0;  // position of the delimiter in the string
+    while (_bidx < line.length() && _didx < line.length()) {
+      // find the first character (starting at _didx) that is not a delimiter
+      _bidx = line.find_first_not_of(delimiters, _didx);
+      // if no more non-delimiter chars, done
+      if (_bidx == std::string::npos) break;
+      // starting at the position of the new string, find the next delimiter
+      _didx = line.find_first_of(delimiters, _bidx);
+      // starting at the position of the new string, get the characters
+      // between this position and the next delimiter
+      std::string _tmp = line.substr(_bidx, _didx - _bidx);
+      // don't add empty strings
+      if (!_tmp.empty()) _result.emplace_back(_tmp);
+    }
+    return _result;
+  };
+  auto vargs = tokenize(args, " \t");
+  if (vargs.size() == 0) return;
+  auto _argc          = static_cast<int>(vargs.size());
+  char** _argv        = new char*[_argc + 1];
+  _argv[vargs.size()] = nullptr;
+  for (int i = 0; i < _argc; ++i) {
+    auto& _str = vargs.at(i);
+    _argv[i]   = new char[_str.length() + 1];
+    std::memcpy(_argv[i], _str.c_str(), _str.length() * sizeof(char));
+    _argv[i][_str.length()] = '\0';
+  }
+  parseArgs(_argc, _argv);
+  for (int i = 0; i < _argc; ++i) {
+    delete[] _argv[i];
+  }
+  delete[] _argv;
+}
+
+SpaceHandle make_space_handle(const char* space_name) {
+  SpaceHandle handle;
+  strncpy(handle.name, space_name, 63);
+  return handle;
+}
+
+template <typename Callback>
+void lookup_function(void* dlopen_handle, const std::string& basename,
+                     Callback& callback) {
+#ifdef KOKKOS_TOOLS_ENABLE_LIBDL
+  // dlsym returns a pointer to an object, while we want to assign to
+  // pointer to function A direct cast will give warnings hence, we have to
+  // workaround the issue by casting pointer to pointers.
+  void* p  = dlsym(dlopen_handle, basename.c_str());
+  callback = *reinterpret_cast<Callback*>(&p);
+#else
+  (void)dlopen_handle;
+  (void)basename;
+  (void)callback;
+#endif
+}
+
+void initialize(const std::string& profileLibrary) {
+  // Make sure initialize calls happens only once
+  static int is_initialized = 0;
+  if (is_initialized) return;
+  is_initialized = 1;
+
+  auto invoke_init_callbacks = []() {
+    Experimental::invoke_kokkosp_callback(
+        Experimental::MayRequireGlobalFencing::No,
+        Experimental::current_callbacks.init, 0,
+        (uint64_t)KOKKOSP_INTERFACE_VERSION, (uint32_t)0, nullptr);
+
+    Experimental::tool_requirements.requires_global_fencing = true;
+
+    Experimental::invoke_kokkosp_callback(
+        Experimental::MayRequireGlobalFencing::No,
+        Experimental::current_callbacks.request_tool_settings, 1,
+        &Experimental::tool_requirements);
+
+    Experimental::ToolProgrammingInterface actions;
+    actions.fence = &Experimental::Impl::tool_invoked_fence;
+
+    Experimental::invoke_kokkosp_callback(
+        Experimental::MayRequireGlobalFencing::No,
+        Experimental::current_callbacks.provide_tool_programming_interface, 1,
+        actions);
+  };
+
+#ifdef KOKKOS_TOOLS_ENABLE_LIBDL
+  void* firstProfileLibrary = nullptr;
+
+  if ((profileLibrary.empty()) ||
+      (profileLibrary == InitArguments::unset_string_option)) {
+    invoke_init_callbacks();
+    return;
+  }
+
+  char* envProfileLibrary = const_cast<char*>(profileLibrary.c_str());
+
+  const auto envProfileCopy =
+      std::make_unique<char[]>(strlen(envProfileLibrary) + 1);
+  sprintf(envProfileCopy.get(), "%s", envProfileLibrary);
+
+  char* profileLibraryName = strtok(envProfileCopy.get(), ";");
+
+  if ((profileLibraryName != nullptr) &&
+      (strcmp(profileLibraryName, "") != 0)) {
+    firstProfileLibrary = dlopen(profileLibraryName, RTLD_NOW | RTLD_GLOBAL);
+
+    if (firstProfileLibrary == nullptr) {
+      std::cerr << "Error: Unable to load KokkosP library: "
+                << profileLibraryName << std::endl;
+      std::cerr << "dlopen(" << profileLibraryName
+                << ", RTLD_NOW | RTLD_GLOBAL) failed with " << dlerror()
+                << '\n';
+    } else {
+#ifdef KOKKOS_ENABLE_PROFILING_LOAD_PRINT
+      std::cout << "KokkosP: Library Loaded: " << profileLibraryName
+                << std::endl;
+#endif
+      lookup_function(firstProfileLibrary, "kokkosp_begin_parallel_scan",
+                      Experimental::current_callbacks.begin_parallel_scan);
+      lookup_function(firstProfileLibrary, "kokkosp_begin_parallel_for",
+                      Experimental::current_callbacks.begin_parallel_for);
+      lookup_function(firstProfileLibrary, "kokkosp_begin_parallel_reduce",
+                      Experimental::current_callbacks.begin_parallel_reduce);
+      lookup_function(firstProfileLibrary, "kokkosp_end_parallel_scan",
+                      Experimental::current_callbacks.end_parallel_scan);
+      lookup_function(firstProfileLibrary, "kokkosp_end_parallel_for",
+                      Experimental::current_callbacks.end_parallel_for);
+      lookup_function(firstProfileLibrary, "kokkosp_end_parallel_reduce",
+                      Experimental::current_callbacks.end_parallel_reduce);
+
+      lookup_function(firstProfileLibrary, "kokkosp_init_library",
+                      Experimental::current_callbacks.init);
+      lookup_function(firstProfileLibrary, "kokkosp_finalize_library",
+                      Experimental::current_callbacks.finalize);
+
+      lookup_function(firstProfileLibrary, "kokkosp_push_profile_region",
+                      Experimental::current_callbacks.push_region);
+      lookup_function(firstProfileLibrary, "kokkosp_pop_profile_region",
+                      Experimental::current_callbacks.pop_region);
+      lookup_function(firstProfileLibrary, "kokkosp_allocate_data",
+                      Experimental::current_callbacks.allocate_data);
+      lookup_function(firstProfileLibrary, "kokkosp_deallocate_data",
+                      Experimental::current_callbacks.deallocate_data);
+
+      lookup_function(firstProfileLibrary, "kokkosp_begin_deep_copy",
+                      Experimental::current_callbacks.begin_deep_copy);
+      lookup_function(firstProfileLibrary, "kokkosp_end_deep_copy",
+                      Experimental::current_callbacks.end_deep_copy);
+      lookup_function(firstProfileLibrary, "kokkosp_begin_fence",
+                      Experimental::current_callbacks.begin_fence);
+      lookup_function(firstProfileLibrary, "kokkosp_end_fence",
+                      Experimental::current_callbacks.end_fence);
+      lookup_function(firstProfileLibrary, "kokkosp_dual_view_sync",
+                      Experimental::current_callbacks.sync_dual_view);
+      lookup_function(firstProfileLibrary, "kokkosp_dual_view_modify",
+                      Experimental::current_callbacks.modify_dual_view);
+
+      lookup_function(firstProfileLibrary, "kokkosp_declare_metadata",
+                      Experimental::current_callbacks.declare_metadata);
+      lookup_function(firstProfileLibrary, "kokkosp_create_profile_section",
+                      Experimental::current_callbacks.create_profile_section);
+      lookup_function(firstProfileLibrary, "kokkosp_start_profile_section",
+                      Experimental::current_callbacks.start_profile_section);
+      lookup_function(firstProfileLibrary, "kokkosp_stop_profile_section",
+                      Experimental::current_callbacks.stop_profile_section);
+      lookup_function(firstProfileLibrary, "kokkosp_destroy_profile_section",
+                      Experimental::current_callbacks.destroy_profile_section);
+
+      lookup_function(firstProfileLibrary, "kokkosp_profile_event",
+                      Experimental::current_callbacks.profile_event);
+#ifdef KOKKOS_ENABLE_TUNING
+      lookup_function(firstProfileLibrary, "kokkosp_declare_output_type",
+                      Experimental::current_callbacks.declare_output_type);
+
+      lookup_function(firstProfileLibrary, "kokkosp_declare_input_type",
+                      Experimental::current_callbacks.declare_input_type);
+      lookup_function(firstProfileLibrary, "kokkosp_request_values",
+                      Experimental::current_callbacks.request_output_values);
+      lookup_function(firstProfileLibrary, "kokkosp_end_context",
+                      Experimental::current_callbacks.end_tuning_context);
+      lookup_function(firstProfileLibrary, "kokkosp_begin_context",
+                      Experimental::current_callbacks.begin_tuning_context);
+      lookup_function(
+          firstProfileLibrary, "kokkosp_declare_optimization_goal",
+          Experimental::current_callbacks.declare_optimization_goal);
+#endif  // KOKKOS_ENABLE_TUNING
+
+      lookup_function(firstProfileLibrary, "kokkosp_print_help",
+                      Experimental::current_callbacks.print_help);
+      lookup_function(firstProfileLibrary, "kokkosp_parse_args",
+                      Experimental::current_callbacks.parse_args);
+      lookup_function(
+          firstProfileLibrary, "kokkosp_provide_tool_programming_interface",
+          Experimental::current_callbacks.provide_tool_programming_interface);
+      lookup_function(firstProfileLibrary, "kokkosp_request_tool_settings",
+                      Experimental::current_callbacks.request_tool_settings);
+    }
+  }
+#else
+  (void)profileLibrary;
+#endif  // KOKKOS_ENABLE_LIBDL
+
+  invoke_init_callbacks();
+
+#ifdef KOKKOS_ENABLE_TUNING
+  Experimental::VariableInfo kernel_name;
+  kernel_name.type = Experimental::ValueType::kokkos_value_string;
+  kernel_name.category =
+      Experimental::StatisticalCategory::kokkos_value_categorical;
+  kernel_name.valueQuantity =
+      Experimental::CandidateValueType::kokkos_value_unbounded;
+
+  std::array<std::string, 4> candidate_values = {
+      "parallel_for",
+      "parallel_reduce",
+      "parallel_scan",
+      "parallel_copy",
+  };
+
+  Experimental::SetOrRange kernel_type_variable_candidates =
+      Experimental::make_candidate_set(4, candidate_values.data());
+
+  Experimental::kernel_name_context_variable_id =
+      Experimental::declare_input_type("kokkos.kernel_name", kernel_name);
+
+  Experimental::VariableInfo kernel_type;
+  kernel_type.type = Experimental::ValueType::kokkos_value_string;
+  kernel_type.category =
+      Experimental::StatisticalCategory::kokkos_value_categorical;
+  kernel_type.valueQuantity =
+      Experimental::CandidateValueType::kokkos_value_set;
+  kernel_type.candidates = kernel_type_variable_candidates;
+  Experimental::kernel_type_context_variable_id =
+      Experimental::declare_input_type("kokkos.kernel_type", kernel_type);
+
+#endif
+
+  Experimental::no_profiling.init     = nullptr;
+  Experimental::no_profiling.finalize = nullptr;
+
+  Experimental::no_profiling.begin_parallel_for    = nullptr;
+  Experimental::no_profiling.begin_parallel_scan   = nullptr;
+  Experimental::no_profiling.begin_parallel_reduce = nullptr;
+  Experimental::no_profiling.end_parallel_scan     = nullptr;
+  Experimental::no_profiling.end_parallel_for      = nullptr;
+  Experimental::no_profiling.end_parallel_reduce   = nullptr;
+
+  Experimental::no_profiling.push_region     = nullptr;
+  Experimental::no_profiling.pop_region      = nullptr;
+  Experimental::no_profiling.allocate_data   = nullptr;
+  Experimental::no_profiling.deallocate_data = nullptr;
+
+  Experimental::no_profiling.begin_deep_copy = nullptr;
+  Experimental::no_profiling.end_deep_copy   = nullptr;
+
+  Experimental::no_profiling.create_profile_section  = nullptr;
+  Experimental::no_profiling.start_profile_section   = nullptr;
+  Experimental::no_profiling.stop_profile_section    = nullptr;
+  Experimental::no_profiling.destroy_profile_section = nullptr;
+
+  Experimental::no_profiling.profile_event = nullptr;
+
+  Experimental::no_profiling.declare_input_type    = nullptr;
+  Experimental::no_profiling.declare_output_type   = nullptr;
+  Experimental::no_profiling.request_output_values = nullptr;
+  Experimental::no_profiling.end_tuning_context    = nullptr;
+}
+
+void finalize() {
+  // Make sure finalize calls happens only once
+  static int is_finalized = 0;
+  if (is_finalized) return;
+  is_finalized = 1;
+
+  if (Experimental::current_callbacks.finalize != nullptr) {
+    Experimental::invoke_kokkosp_callback(
+        Experimental::MayRequireGlobalFencing::No,
+        Experimental::current_callbacks.finalize);
+
+    Experimental::pause_tools();
+  }
+#ifdef KOKKOS_ENABLE_TUNING
+  // clean up string candidate set
+  for (auto& metadata_pair : Experimental::variable_metadata) {
+    auto metadata = metadata_pair.second;
+    if ((metadata.type == Experimental::ValueType::kokkos_value_string) &&
+        (metadata.valueQuantity ==
+         Experimental::CandidateValueType::kokkos_value_set)) {
+      auto candidate_set = metadata.candidates.set;
+      delete[] candidate_set.values.string_value;
+    }
+  }
+#endif
+}
+
+void syncDualView(const std::string& label, const void* const ptr,
+                  bool to_device) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.sync_dual_view, label.c_str(), ptr,
+      to_device);
+}
+void modifyDualView(const std::string& label, const void* const ptr,
+                    bool on_device) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.modify_dual_view, label.c_str(), ptr,
+      on_device);
+}
+
+void declareMetadata(const std::string& key, const std::string& value) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.declare_metadata, key.c_str(),
+      value.c_str());
+}
+
+}  // namespace Tools
+
+namespace Tools {
+namespace Experimental {
+void set_init_callback(initFunction callback) {
+  current_callbacks.init = callback;
+}
+void set_finalize_callback(finalizeFunction callback) {
+  current_callbacks.finalize = callback;
+}
+void set_parse_args_callback(parseArgsFunction callback) {
+  current_callbacks.parse_args = callback;
+}
+void set_print_help_callback(printHelpFunction callback) {
+  current_callbacks.print_help = callback;
+}
+void set_begin_parallel_for_callback(beginFunction callback) {
+  current_callbacks.begin_parallel_for = callback;
+}
+void set_end_parallel_for_callback(endFunction callback) {
+  current_callbacks.end_parallel_for = callback;
+}
+void set_begin_parallel_reduce_callback(beginFunction callback) {
+  current_callbacks.begin_parallel_reduce = callback;
+}
+void set_end_parallel_reduce_callback(endFunction callback) {
+  current_callbacks.end_parallel_reduce = callback;
+}
+void set_begin_parallel_scan_callback(beginFunction callback) {
+  current_callbacks.begin_parallel_scan = callback;
+}
+void set_end_parallel_scan_callback(endFunction callback) {
+  current_callbacks.end_parallel_scan = callback;
+}
+void set_push_region_callback(pushFunction callback) {
+  current_callbacks.push_region = callback;
+}
+void set_pop_region_callback(popFunction callback) {
+  current_callbacks.pop_region = callback;
+}
+void set_allocate_data_callback(allocateDataFunction callback) {
+  current_callbacks.allocate_data = callback;
+}
+void set_deallocate_data_callback(deallocateDataFunction callback) {
+  current_callbacks.deallocate_data = callback;
+}
+void set_create_profile_section_callback(
+    createProfileSectionFunction callback) {
+  current_callbacks.create_profile_section = callback;
+}
+void set_start_profile_section_callback(startProfileSectionFunction callback) {
+  current_callbacks.start_profile_section = callback;
+}
+void set_stop_profile_section_callback(stopProfileSectionFunction callback) {
+  current_callbacks.stop_profile_section = callback;
+}
+void set_destroy_profile_section_callback(
+    destroyProfileSectionFunction callback) {
+  current_callbacks.destroy_profile_section = callback;
+}
+void set_profile_event_callback(profileEventFunction callback) {
+  current_callbacks.profile_event = callback;
+}
+void set_begin_deep_copy_callback(beginDeepCopyFunction callback) {
+  current_callbacks.begin_deep_copy = callback;
+}
+void set_end_deep_copy_callback(endDeepCopyFunction callback) {
+  current_callbacks.end_deep_copy = callback;
+}
+void set_begin_fence_callback(beginFenceFunction callback) {
+  current_callbacks.begin_fence = callback;
+}
+void set_end_fence_callback(endFenceFunction callback) {
+  current_callbacks.end_fence = callback;
+}
+
+void set_dual_view_sync_callback(dualViewSyncFunction callback) {
+  current_callbacks.sync_dual_view = callback;
+}
+void set_dual_view_modify_callback(dualViewModifyFunction callback) {
+  current_callbacks.modify_dual_view = callback;
+}
+void set_declare_metadata_callback(declareMetadataFunction callback) {
+  current_callbacks.declare_metadata = callback;
+}
+void set_request_tool_settings_callback(requestToolSettingsFunction callback) {
+  current_callbacks.request_tool_settings = callback;
+}
+void set_provide_tool_programming_interface_callback(
+    provideToolProgrammingInterfaceFunction callback) {
+  current_callbacks.provide_tool_programming_interface = callback;
+}
+
+void set_declare_output_type_callback(
+    Experimental::outputTypeDeclarationFunction callback) {
+  current_callbacks.declare_output_type = callback;
+}
+void set_declare_input_type_callback(
+    Experimental::inputTypeDeclarationFunction callback) {
+  current_callbacks.declare_input_type = callback;
+}
+void set_request_output_values_callback(
+    Experimental::requestValueFunction callback) {
+  current_callbacks.request_output_values = callback;
+}
+void set_end_context_callback(Experimental::contextEndFunction callback) {
+  current_callbacks.end_tuning_context = callback;
+}
+void set_begin_context_callback(Experimental::contextBeginFunction callback) {
+  current_callbacks.begin_tuning_context = callback;
+}
+void set_declare_optimization_goal_callback(
+    Experimental::optimizationGoalDeclarationFunction callback) {
+  current_callbacks.declare_optimization_goal = callback;
+}
+
+void pause_tools() {
+  backup_callbacks  = current_callbacks;
+  current_callbacks = no_profiling;
+}
+
+void resume_tools() { current_callbacks = backup_callbacks; }
+
+Kokkos::Tools::Experimental::EventSet get_callbacks() {
+  return current_callbacks;
+}
+void set_callbacks(Kokkos::Tools::Experimental::EventSet new_events) {
+  current_callbacks = new_events;
+}
+}  // namespace Experimental
+}  // namespace Tools
+
+namespace Profiling {
+bool profileLibraryLoaded() { return Kokkos::Tools::profileLibraryLoaded(); }
+
+void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
+                      uint64_t* kernelID) {
+  Kokkos::Tools::beginParallelFor(kernelPrefix, devID, kernelID);
+}
+void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
+                         uint64_t* kernelID) {
+  Kokkos::Tools::beginParallelReduce(kernelPrefix, devID, kernelID);
+}
+void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
+                       uint64_t* kernelID) {
+  Kokkos::Tools::beginParallelScan(kernelPrefix, devID, kernelID);
+}
+void endParallelFor(const uint64_t kernelID) {
+  Kokkos::Tools::endParallelFor(kernelID);
+}
+void endParallelReduce(const uint64_t kernelID) {
+  Kokkos::Tools::endParallelReduce(kernelID);
+}
+void endParallelScan(const uint64_t kernelID) {
+  Kokkos::Tools::endParallelScan(kernelID);
+}
+
+void pushRegion(const std::string& kName) { Kokkos::Tools::pushRegion(kName); }
+void popRegion() { Kokkos::Tools::popRegion(); }
+
+void createProfileSection(const std::string& sectionName, uint32_t* secID) {
+  Kokkos::Tools::createProfileSection(sectionName, secID);
+}
+void destroyProfileSection(const uint32_t secID) {
+  Kokkos::Tools::destroyProfileSection(secID);
+}
+
+void startSection(const uint32_t secID) { Kokkos::Tools::startSection(secID); }
+
+void stopSection(const uint32_t secID) { Kokkos::Tools::stopSection(secID); }
+
+void markEvent(const std::string& eventName) {
+  Kokkos::Tools::markEvent(eventName);
+}
+void allocateData(const SpaceHandle handle, const std::string name,
+                  const void* data, const uint64_t size) {
+  Kokkos::Tools::allocateData(handle, name, data, size);
+}
+void deallocateData(const SpaceHandle space, const std::string label,
+                    const void* ptr, const uint64_t size) {
+  Kokkos::Tools::deallocateData(space, label, ptr, size);
+}
+
+void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
+                   const void* dst_ptr, const SpaceHandle src_space,
+                   const std::string src_label, const void* src_ptr,
+                   const uint64_t size) {
+  Kokkos::Tools::beginDeepCopy(dst_space, dst_label, dst_ptr, src_space,
+                               src_label, src_ptr, size);
+}
+void endDeepCopy() { Kokkos::Tools::endDeepCopy(); }
+
+void finalize() { Kokkos::Tools::finalize(); }
+void initialize(const std::string& profileLibrary) {
+  Kokkos::Tools::initialize(profileLibrary);
+}
+
+bool printHelp(const std::string& args) {
+  return Kokkos::Tools::printHelp(args);
+}
+void parseArgs(const std::string& args) { Kokkos::Tools::parseArgs(args); }
+void parseArgs(int _argc, char** _argv) {
+  Kokkos::Tools::parseArgs(_argc, _argv);
+}
+
+SpaceHandle make_space_handle(const char* space_name) {
+  return Kokkos::Tools::make_space_handle(space_name);
+}
+}  // namespace Profiling
+
+// Tuning
+
+namespace Tools {
+namespace Experimental {
+static size_t& get_context_counter() {
+  static size_t x;
+  return x;
+}
+static size_t& get_variable_counter() {
+  static size_t x;
+  return ++x;
+}
+
+size_t get_new_context_id() { return ++get_context_counter(); }
+size_t get_current_context_id() { return get_context_counter(); }
+void decrement_current_context_id() { --get_context_counter(); }
+size_t get_new_variable_id() { return get_variable_counter(); }
+
+size_t declare_output_type(const std::string& variableName, VariableInfo info) {
+  size_t variableId = get_new_variable_id();
+#ifdef KOKKOS_ENABLE_TUNING
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.declare_output_type, variableName.c_str(),
+      variableId, &info);
+  variable_metadata[variableId] = info;
+#else
+  (void)variableName;
+  (void)info;
+#endif
+  return variableId;
+}
+
+size_t declare_input_type(const std::string& variableName, VariableInfo info) {
+  size_t variableId = get_new_variable_id();
+#ifdef KOKKOS_ENABLE_TUNING
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.declare_input_type, variableName.c_str(),
+      variableId, &info);
+  variable_metadata[variableId] = info;
+#else
+  (void)variableName;
+  (void)info;
+#endif
+  return variableId;
+}
+
+void set_input_values(size_t contextId, size_t count, VariableValue* values) {
+#ifdef KOKKOS_ENABLE_TUNING
+  if (features_per_context.find(contextId) == features_per_context.end()) {
+    features_per_context[contextId] = std::unordered_set<size_t>();
+  }
+  for (size_t x = 0; x < count; ++x) {
+    values[x].metadata = &variable_metadata[values[x].type_id];
+    features_per_context[contextId].insert(values[x].type_id);
+    active_features.insert(values[x].type_id);
+    feature_values[values[x].type_id] = values[x];
+  }
+#else
+  (void)contextId;
+  (void)count;
+  (void)values;
+#endif
+}
+#include <iostream>
+void request_output_values(size_t contextId, size_t count,
+                           VariableValue* values) {
+#ifdef KOKKOS_ENABLE_TUNING
+  std::vector<size_t> context_ids;
+  std::vector<VariableValue> context_values;
+  for (auto id : active_features) {
+    context_values.push_back(feature_values[id]);
+  }
+  if (Experimental::current_callbacks.request_output_values != nullptr) {
+    for (size_t x = 0; x < count; ++x) {
+      values[x].metadata = &variable_metadata[values[x].type_id];
+    }
+    Experimental::invoke_kokkosp_callback(
+        Experimental::MayRequireGlobalFencing::No,
+        Experimental::current_callbacks.request_output_values, contextId,
+        context_values.size(), context_values.data(), count, values);
+  }
+#else
+  (void)contextId;
+  (void)count;
+  (void)values;
+#endif
+}
+
+#ifdef KOKKOS_ENABLE_TUNING
+static std::unordered_map<size_t, size_t> optimization_goals;
+#endif
+
+void begin_context(size_t contextId) {
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.begin_tuning_context, contextId);
+}
+void end_context(size_t contextId) {
+#ifdef KOKKOS_ENABLE_TUNING
+  for (auto id : features_per_context[contextId]) {
+    active_features.erase(id);
+  }
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.end_tuning_context, contextId,
+      feature_values[optimization_goals[contextId]]);
+  optimization_goals.erase(contextId);
+  decrement_current_context_id();
+#else
+  (void)contextId;
+#endif
+}
+
+bool have_tuning_tool() {
+#ifdef KOKKOS_ENABLE_TUNING
+  return (Experimental::current_callbacks.request_output_values != nullptr);
+#else
+  return false;
+#endif
+}
+
+VariableValue make_variable_value(size_t id, int64_t val) {
+  VariableValue variable_value;
+  variable_value.type_id         = id;
+  variable_value.value.int_value = val;
+  return variable_value;
+}
+VariableValue make_variable_value(size_t id, double val) {
+  VariableValue variable_value;
+  variable_value.type_id            = id;
+  variable_value.value.double_value = val;
+  return variable_value;
+}
+VariableValue make_variable_value(size_t id, const std::string& val) {
+  VariableValue variable_value;
+  variable_value.type_id = id;
+  strncpy(variable_value.value.string_value, val.c_str(),
+          KOKKOS_TOOLS_TUNING_STRING_LENGTH - 1);
+  return variable_value;
+}
+SetOrRange make_candidate_set(size_t size, std::string* data) {
+  SetOrRange value_set;
+  value_set.set.values.string_value = new TuningString[size];
+  for (size_t x = 0; x < size; ++x) {
+    strncpy(value_set.set.values.string_value[x], data[x].c_str(),
+            KOKKOS_TOOLS_TUNING_STRING_LENGTH - 1);
+  }
+  value_set.set.size = size;
+  return value_set;
+}
+SetOrRange make_candidate_set(size_t size, int64_t* data) {
+  SetOrRange value_set;
+  value_set.set.size             = size;
+  value_set.set.values.int_value = data;
+  return value_set;
+}
+SetOrRange make_candidate_set(size_t size, double* data) {
+  SetOrRange value_set;
+  value_set.set.size                = size;
+  value_set.set.values.double_value = data;
+  return value_set;
+}
+SetOrRange make_candidate_range(double lower, double upper, double step,
+                                bool openLower = false,
+                                bool openUpper = false) {
+  SetOrRange value_range;
+  value_range.range.lower.double_value = lower;
+  value_range.range.upper.double_value = upper;
+  value_range.range.step.double_value  = step;
+  value_range.range.openLower          = openLower;
+  value_range.range.openUpper          = openUpper;
+  return value_range;
+}
+
+SetOrRange make_candidate_range(int64_t lower, int64_t upper, int64_t step,
+                                bool openLower = false,
+                                bool openUpper = false) {
+  SetOrRange value_range;
+  value_range.range.lower.int_value = lower;
+  value_range.range.upper.int_value = upper;
+  value_range.range.step.int_value  = step;
+  value_range.range.openLower       = openLower;
+  value_range.range.openUpper       = openUpper;
+  return value_range;
+}
+
+size_t get_new_context_id();
+size_t get_current_context_id();
+void decrement_current_context_id();
+size_t get_new_variable_id();
+void declare_optimization_goal(const size_t context,
+                               const OptimizationGoal& goal) {
+#ifdef KOKKOS_ENABLE_TUNING
+  Experimental::invoke_kokkosp_callback(
+      Experimental::MayRequireGlobalFencing::No,
+      Experimental::current_callbacks.declare_optimization_goal, context, goal);
+  optimization_goals[context] = goal.type_id;
+#else
+  (void)context;
+  (void)goal;
+#endif
+}
+}  // end namespace Experimental
+}  // end namespace Tools
+
+}  // end namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling.hpp
new file mode 100644 (file)
index 0000000..cb17a0c
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_PROFILING_HPP
+#define KOKKOS_IMPL_KOKKOS_PROFILING_HPP
+
+#include <impl/Kokkos_Profiling_Interface.hpp>
+#include <memory>
+#include <iosfwd>
+#include <unordered_map>
+#include <map>
+#include <string>
+#include <type_traits>
+#include <mutex>
+namespace Kokkos {
+
+// forward declaration
+bool show_warnings() noexcept;
+bool tune_internals() noexcept;
+
+namespace Tools {
+
+struct InitArguments {
+  // NOTE DZP: PossiblyUnsetOption was introduced
+  // before C++17, std::optional is a better choice
+  // for this long-term
+  static const std::string unset_string_option;
+  enum PossiblyUnsetOption { unset, off, on };
+  PossiblyUnsetOption help = unset;
+  std::string lib          = unset_string_option;
+  std::string args         = unset_string_option;
+};
+
+namespace Impl {
+
+struct InitializationStatus {
+  enum InitializationResult {
+    success,
+    failure,
+    help_request,
+    environment_argument_mismatch
+  };
+  InitializationResult result;
+  std::string error_message;
+};
+InitializationStatus initialize_tools_subsystem(
+    const Kokkos::Tools::InitArguments& args);
+
+void parse_command_line_arguments(int& narg, char* arg[],
+                                  InitArguments& arguments);
+Kokkos::Tools::Impl::InitializationStatus parse_environment_variables(
+    InitArguments& arguments);
+
+}  // namespace Impl
+
+bool profileLibraryLoaded();
+
+void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
+                      uint64_t* kernelID);
+void endParallelFor(const uint64_t kernelID);
+void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
+                       uint64_t* kernelID);
+void endParallelScan(const uint64_t kernelID);
+void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
+                         uint64_t* kernelID);
+void endParallelReduce(const uint64_t kernelID);
+
+void pushRegion(const std::string& kName);
+void popRegion();
+
+void createProfileSection(const std::string& sectionName, uint32_t* secID);
+void startSection(const uint32_t secID);
+void stopSection(const uint32_t secID);
+void destroyProfileSection(const uint32_t secID);
+
+void markEvent(const std::string& evName);
+
+void allocateData(const SpaceHandle space, const std::string label,
+                  const void* ptr, const uint64_t size);
+void deallocateData(const SpaceHandle space, const std::string label,
+                    const void* ptr, const uint64_t size);
+
+void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
+                   const void* dst_ptr, const SpaceHandle src_space,
+                   const std::string src_label, const void* src_ptr,
+                   const uint64_t size);
+void endDeepCopy();
+void beginFence(const std::string name, const uint32_t deviceId,
+                uint64_t* handle);
+void endFence(const uint64_t handle);
+
+/**
+ * syncDualView declares to the tool that a given DualView
+ * has been synced.
+ *
+ * Arguments:
+ *
+ * label:     name of the View within the DualView
+ * ptr:       that View's data ptr
+ * to_device: true if the data is being synchronized to the device
+ *             false otherwise
+ */
+void syncDualView(const std::string& label, const void* const ptr,
+                  bool to_device);
+/**
+ * modifyDualView declares to the tool that a given DualView
+ * has been modified. Note: this means that somebody *called*
+ * modify on the DualView, this doesn't get called any time
+ * somebody touches the data
+ *
+ * Arguments:
+ *
+ * label:     name of the View within the DualView
+ * ptr:       that View's data ptr
+ * on_device: true if the data is being modified on the device
+ *             false otherwise
+ */
+void modifyDualView(const std::string& label, const void* const ptr,
+                    bool on_device);
+
+void declareMetadata(const std::string& key, const std::string& value);
+void initialize(
+    const std::string& = {});  // should rename to impl_initialize ASAP
+void initialize(const Kokkos::Tools::InitArguments&);
+void initialize(int argc, char* argv[]);
+void finalize();
+bool printHelp(const std::string&);
+void parseArgs(const std::string&);
+
+Kokkos_Profiling_SpaceHandle make_space_handle(const char* space_name);
+
+namespace Experimental {
+
+namespace Impl {
+struct DirectFenceIDHandle {
+  uint32_t value;
+};
+//
+template <typename Space>
+uint32_t idForInstance(const uintptr_t instance) {
+  static std::mutex instance_mutex;
+  const std::lock_guard<std::mutex> lock(instance_mutex);
+  /** Needed to be a ptr due to initialization order problems*/
+  using map_type = std::map<uintptr_t, uint32_t>;
+
+  static std::shared_ptr<map_type> map;
+  if (map.get() == nullptr) {
+    map = std::make_shared<map_type>(map_type());
+  }
+
+  static uint32_t value = 0;
+  constexpr const uint32_t offset =
+      Kokkos::Tools::Experimental::NumReservedDeviceIDs;
+
+  auto find = map->find(instance);
+  if (find == map->end()) {
+    auto ret         = offset + value++;
+    (*map)[instance] = ret;
+    return ret;
+  }
+
+  return find->second;
+}
+
+template <typename Space, typename FencingFunctor>
+void profile_fence_event(const std::string& name, DirectFenceIDHandle devIDTag,
+                         const FencingFunctor& func) {
+  uint64_t handle = 0;
+  Kokkos::Tools::beginFence(
+      name,
+      Kokkos::Tools::Experimental::device_id_root<Space>() + devIDTag.value,
+      &handle);
+  func();
+  Kokkos::Tools::endFence(handle);
+}
+
+inline uint32_t int_for_synchronization_reason(
+    Kokkos::Tools::Experimental::SpecialSynchronizationCases reason) {
+  switch (reason) {
+    case GlobalDeviceSynchronization: return 0;
+    case DeepCopyResourceSynchronization: return 0x00ffffff;
+  }
+  return 0;
+}
+
+template <typename Space, typename FencingFunctor>
+void profile_fence_event(
+    const std::string& name,
+    Kokkos::Tools::Experimental::SpecialSynchronizationCases reason,
+    const FencingFunctor& func) {
+  uint64_t handle = 0;
+  Kokkos::Tools::beginFence(
+      name, device_id_root<Space>() + int_for_synchronization_reason(reason),
+      &handle);  // TODO: correct ID
+  func();
+  Kokkos::Tools::endFence(handle);
+}
+}  // namespace Impl
+void set_init_callback(initFunction callback);
+void set_finalize_callback(finalizeFunction callback);
+void set_parse_args_callback(parseArgsFunction callback);
+void set_print_help_callback(printHelpFunction callback);
+void set_begin_parallel_for_callback(beginFunction callback);
+void set_end_parallel_for_callback(endFunction callback);
+void set_begin_parallel_reduce_callback(beginFunction callback);
+void set_end_parallel_reduce_callback(endFunction callback);
+void set_begin_parallel_scan_callback(beginFunction callback);
+void set_end_parallel_scan_callback(endFunction callback);
+void set_push_region_callback(pushFunction callback);
+void set_pop_region_callback(popFunction callback);
+void set_allocate_data_callback(allocateDataFunction callback);
+void set_deallocate_data_callback(deallocateDataFunction callback);
+void set_create_profile_section_callback(createProfileSectionFunction callback);
+void set_start_profile_section_callback(startProfileSectionFunction callback);
+void set_stop_profile_section_callback(stopProfileSectionFunction callback);
+void set_destroy_profile_section_callback(
+    destroyProfileSectionFunction callback);
+void set_profile_event_callback(profileEventFunction callback);
+void set_begin_deep_copy_callback(beginDeepCopyFunction callback);
+void set_end_deep_copy_callback(endDeepCopyFunction callback);
+void set_begin_fence_callback(beginFenceFunction callback);
+void set_end_fence_callback(endFenceFunction callback);
+void set_dual_view_sync_callback(dualViewSyncFunction callback);
+void set_dual_view_modify_callback(dualViewModifyFunction callback);
+void set_declare_metadata_callback(declareMetadataFunction callback);
+void set_request_tool_settings_callback(requestToolSettingsFunction callback);
+void set_provide_tool_programming_interface_callback(
+    provideToolProgrammingInterfaceFunction callback);
+void set_declare_output_type_callback(outputTypeDeclarationFunction callback);
+void set_declare_input_type_callback(inputTypeDeclarationFunction callback);
+void set_request_output_values_callback(requestValueFunction callback);
+void set_declare_optimization_goal_callback(
+    optimizationGoalDeclarationFunction callback);
+void set_end_context_callback(contextEndFunction callback);
+void set_begin_context_callback(contextBeginFunction callback);
+
+void pause_tools();
+void resume_tools();
+
+EventSet get_callbacks();
+void set_callbacks(EventSet new_events);
+}  // namespace Experimental
+
+namespace Experimental {
+// forward declarations
+size_t get_new_context_id();
+size_t get_current_context_id();
+}  // namespace Experimental
+
+}  // namespace Tools
+namespace Profiling {
+
+bool profileLibraryLoaded();
+
+void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
+                      uint64_t* kernelID);
+void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
+                         uint64_t* kernelID);
+void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
+                       uint64_t* kernelID);
+void endParallelFor(const uint64_t kernelID);
+void endParallelReduce(const uint64_t kernelID);
+void endParallelScan(const uint64_t kernelID);
+void pushRegion(const std::string& kName);
+void popRegion();
+
+void createProfileSection(const std::string& sectionName, uint32_t* secID);
+void destroyProfileSection(const uint32_t secID);
+void startSection(const uint32_t secID);
+
+void stopSection(const uint32_t secID);
+
+void markEvent(const std::string& eventName);
+void allocateData(const SpaceHandle handle, const std::string name,
+                  const void* data, const uint64_t size);
+void deallocateData(const SpaceHandle space, const std::string label,
+                    const void* ptr, const uint64_t size);
+void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
+                   const void* dst_ptr, const SpaceHandle src_space,
+                   const std::string src_label, const void* src_ptr,
+                   const uint64_t size);
+void endDeepCopy();
+void finalize();
+void initialize(const std::string& = {});
+
+SpaceHandle make_space_handle(const char* space_name);
+
+namespace Experimental {
+using Kokkos::Tools::Experimental::set_allocate_data_callback;
+using Kokkos::Tools::Experimental::set_begin_deep_copy_callback;
+using Kokkos::Tools::Experimental::set_begin_parallel_for_callback;
+using Kokkos::Tools::Experimental::set_begin_parallel_reduce_callback;
+using Kokkos::Tools::Experimental::set_begin_parallel_scan_callback;
+using Kokkos::Tools::Experimental::set_create_profile_section_callback;
+using Kokkos::Tools::Experimental::set_deallocate_data_callback;
+using Kokkos::Tools::Experimental::set_destroy_profile_section_callback;
+using Kokkos::Tools::Experimental::set_end_deep_copy_callback;
+using Kokkos::Tools::Experimental::set_end_parallel_for_callback;
+using Kokkos::Tools::Experimental::set_end_parallel_reduce_callback;
+using Kokkos::Tools::Experimental::set_end_parallel_scan_callback;
+using Kokkos::Tools::Experimental::set_finalize_callback;
+using Kokkos::Tools::Experimental::set_init_callback;
+using Kokkos::Tools::Experimental::set_parse_args_callback;
+using Kokkos::Tools::Experimental::set_pop_region_callback;
+using Kokkos::Tools::Experimental::set_print_help_callback;
+using Kokkos::Tools::Experimental::set_profile_event_callback;
+using Kokkos::Tools::Experimental::set_push_region_callback;
+using Kokkos::Tools::Experimental::set_start_profile_section_callback;
+using Kokkos::Tools::Experimental::set_stop_profile_section_callback;
+
+using Kokkos::Tools::Experimental::EventSet;
+
+using Kokkos::Tools::Experimental::pause_tools;
+using Kokkos::Tools::Experimental::resume_tools;
+
+using Kokkos::Tools::Experimental::get_callbacks;
+using Kokkos::Tools::Experimental::set_callbacks;
+
+}  // namespace Experimental
+}  // namespace Profiling
+
+namespace Tools {
+namespace Experimental {
+
+VariableValue make_variable_value(size_t id, int64_t val);
+VariableValue make_variable_value(size_t id, double val);
+VariableValue make_variable_value(size_t id, const std::string& val);
+
+SetOrRange make_candidate_set(size_t size, std::string* data);
+SetOrRange make_candidate_set(size_t size, int64_t* data);
+SetOrRange make_candidate_set(size_t size, double* data);
+SetOrRange make_candidate_range(double lower, double upper, double step,
+                                bool openLower, bool openUpper);
+
+SetOrRange make_candidate_range(int64_t lower, int64_t upper, int64_t step,
+                                bool openLower, bool openUpper);
+
+void declare_optimization_goal(const size_t context,
+                               const OptimizationGoal& goal);
+
+size_t declare_output_type(const std::string& typeName, VariableInfo info);
+
+size_t declare_input_type(const std::string& typeName, VariableInfo info);
+
+void set_input_values(size_t contextId, size_t count, VariableValue* values);
+
+void end_context(size_t contextId);
+void begin_context(size_t contextId);
+
+void request_output_values(size_t contextId, size_t count,
+                           VariableValue* values);
+
+bool have_tuning_tool();
+
+size_t get_new_context_id();
+size_t get_current_context_id();
+
+size_t get_new_variable_id();
+}  // namespace Experimental
+}  // namespace Tools
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_C_Interface.h b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_C_Interface.h
new file mode 100644 (file)
index 0000000..a069fb8
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_PROFILING_C_INTERFACE_HPP
+#define KOKKOS_PROFILING_C_INTERFACE_HPP
+
+#ifdef __cplusplus
+#include <cstddef>
+#include <cstdint>
+#else
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+#endif
+
+#define KOKKOSP_INTERFACE_VERSION 20211015
+
+// Profiling
+
+struct Kokkos_Profiling_KokkosPDeviceInfo {
+  size_t deviceID;
+};
+
+struct Kokkos_Profiling_SpaceHandle {
+  char name[64];
+};
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_initFunction)(
+    const int, const uint64_t, const uint32_t,
+    struct Kokkos_Profiling_KokkosPDeviceInfo*);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_finalizeFunction)();
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_parseArgsFunction)(int, char**);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_printHelpFunction)(char*);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_beginFunction)(const char*, const uint32_t,
+                                               uint64_t*);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_endFunction)(uint64_t);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_pushFunction)(const char*);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_popFunction)();
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_allocateDataFunction)(
+    const struct Kokkos_Profiling_SpaceHandle, const char*, const void*,
+    const uint64_t);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_deallocateDataFunction)(
+    const struct Kokkos_Profiling_SpaceHandle, const char*, const void*,
+    const uint64_t);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_createProfileSectionFunction)(const char*,
+                                                              uint32_t*);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_startProfileSectionFunction)(const uint32_t);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_stopProfileSectionFunction)(const uint32_t);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_destroyProfileSectionFunction)(const uint32_t);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_profileEventFunction)(const char*);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_beginDeepCopyFunction)(
+    struct Kokkos_Profiling_SpaceHandle, const char*, const void*,
+    struct Kokkos_Profiling_SpaceHandle, const char*, const void*, uint64_t);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_endDeepCopyFunction)();
+typedef void (*Kokkos_Profiling_beginFenceFunction)(const char*, const uint32_t,
+                                                    uint64_t*);
+typedef void (*Kokkos_Profiling_endFenceFunction)(uint64_t);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_dualViewSyncFunction)(const char*,
+                                                      const void* const, bool);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_dualViewModifyFunction)(const char*,
+                                                        const void* const,
+                                                        bool);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Profiling_declareMetadataFunction)(const char*,
+                                                         const char*);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Tools_toolInvokedFenceFunction)(const uint32_t);
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Tools_functionPointer)();
+struct Kokkos_Tools_ToolProgrammingInterface {
+  Kokkos_Tools_toolInvokedFenceFunction fence;
+  // allow addition of more actions
+  Kokkos_Tools_functionPointer padding[31];
+};
+
+struct Kokkos_Tools_ToolSettings {
+  bool requires_global_fencing;
+  bool padding[255];
+};
+
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Tools_provideToolProgrammingInterfaceFunction)(
+    const uint32_t, struct Kokkos_Tools_ToolProgrammingInterface);
+// NOLINTNEXTLINE(modernize-use-using): C compatibility
+typedef void (*Kokkos_Tools_requestToolSettingsFunction)(
+    const uint32_t, struct Kokkos_Tools_ToolSettings*);
+
+// Tuning
+
+#define KOKKOS_TOOLS_TUNING_STRING_LENGTH 64
+typedef char Kokkos_Tools_Tuning_String[KOKKOS_TOOLS_TUNING_STRING_LENGTH];
+union Kokkos_Tools_VariableValue_ValueUnion {
+  int64_t int_value;
+  double double_value;
+  Kokkos_Tools_Tuning_String string_value;
+};
+
+union Kokkos_Tools_VariableValue_ValueUnionSet {
+  int64_t* int_value;
+  double* double_value;
+  Kokkos_Tools_Tuning_String* string_value;
+};
+
+struct Kokkos_Tools_ValueSet {
+  size_t size;
+  union Kokkos_Tools_VariableValue_ValueUnionSet values;
+};
+
+enum Kokkos_Tools_OptimizationType {
+  Kokkos_Tools_Minimize,
+  Kokkos_Tools_Maximize
+};
+
+struct Kokkos_Tools_OptimzationGoal {
+  size_t type_id;
+  enum Kokkos_Tools_OptimizationType goal;
+};
+
+struct Kokkos_Tools_ValueRange {
+  union Kokkos_Tools_VariableValue_ValueUnion lower;
+  union Kokkos_Tools_VariableValue_ValueUnion upper;
+  union Kokkos_Tools_VariableValue_ValueUnion step;
+  bool openLower;
+  bool openUpper;
+};
+
+enum Kokkos_Tools_VariableInfo_ValueType {
+  kokkos_value_double,
+  kokkos_value_int64,
+  kokkos_value_string,
+};
+
+enum Kokkos_Tools_VariableInfo_StatisticalCategory {
+  kokkos_value_categorical,  // unordered distinct objects
+  kokkos_value_ordinal,      // ordered distinct objects
+  kokkos_value_interval,  // ordered distinct objects for which distance matters
+  kokkos_value_ratio  // ordered distinct objects for which distance matters,
+                      // division matters, and the concept of zero exists
+};
+
+enum Kokkos_Tools_VariableInfo_CandidateValueType {
+  kokkos_value_set,       // I am one of [2,3,4,5]
+  kokkos_value_range,     // I am somewhere in [2,12)
+  kokkos_value_unbounded  // I am [text/int/float], but we don't know at
+                          // declaration time what values are appropriate. Only
+                          // valid for Context Variables
+};
+
+union Kokkos_Tools_VariableInfo_SetOrRange {
+  struct Kokkos_Tools_ValueSet set;
+  struct Kokkos_Tools_ValueRange range;
+};
+
+struct Kokkos_Tools_VariableInfo {
+  enum Kokkos_Tools_VariableInfo_ValueType type;
+  enum Kokkos_Tools_VariableInfo_StatisticalCategory category;
+  enum Kokkos_Tools_VariableInfo_CandidateValueType valueQuantity;
+  union Kokkos_Tools_VariableInfo_SetOrRange candidates;
+  void* toolProvidedInfo;
+};
+
+struct Kokkos_Tools_VariableValue {
+  size_t type_id;
+  union Kokkos_Tools_VariableValue_ValueUnion value;
+  struct Kokkos_Tools_VariableInfo* metadata;
+};
+
+typedef void (*Kokkos_Tools_outputTypeDeclarationFunction)(
+    const char*, const size_t, struct Kokkos_Tools_VariableInfo* info);
+typedef void (*Kokkos_Tools_inputTypeDeclarationFunction)(
+    const char*, const size_t, struct Kokkos_Tools_VariableInfo* info);
+
+typedef void (*Kokkos_Tools_requestValueFunction)(
+    const size_t, const size_t, const struct Kokkos_Tools_VariableValue*,
+    const size_t count, struct Kokkos_Tools_VariableValue*);
+typedef void (*Kokkos_Tools_contextBeginFunction)(const size_t);
+typedef void (*Kokkos_Tools_contextEndFunction)(
+    const size_t, struct Kokkos_Tools_VariableValue);
+typedef void (*Kokkos_Tools_optimizationGoalDeclarationFunction)(
+    const size_t, const struct Kokkos_Tools_OptimzationGoal goal);
+
+struct Kokkos_Profiling_EventSet {
+  Kokkos_Profiling_initFunction init;
+  Kokkos_Profiling_finalizeFunction finalize;
+  Kokkos_Profiling_parseArgsFunction parse_args;
+  Kokkos_Profiling_printHelpFunction print_help;
+  Kokkos_Profiling_beginFunction begin_parallel_for;
+  Kokkos_Profiling_endFunction end_parallel_for;
+  Kokkos_Profiling_beginFunction begin_parallel_reduce;
+  Kokkos_Profiling_endFunction end_parallel_reduce;
+  Kokkos_Profiling_beginFunction begin_parallel_scan;
+  Kokkos_Profiling_endFunction end_parallel_scan;
+  Kokkos_Profiling_pushFunction push_region;
+  Kokkos_Profiling_popFunction pop_region;
+  Kokkos_Profiling_allocateDataFunction allocate_data;
+  Kokkos_Profiling_deallocateDataFunction deallocate_data;
+  Kokkos_Profiling_createProfileSectionFunction create_profile_section;
+  Kokkos_Profiling_startProfileSectionFunction start_profile_section;
+  Kokkos_Profiling_stopProfileSectionFunction stop_profile_section;
+  Kokkos_Profiling_destroyProfileSectionFunction destroy_profile_section;
+  Kokkos_Profiling_profileEventFunction profile_event;
+  Kokkos_Profiling_beginDeepCopyFunction begin_deep_copy;
+  Kokkos_Profiling_endDeepCopyFunction end_deep_copy;
+  Kokkos_Profiling_beginFenceFunction begin_fence;
+  Kokkos_Profiling_endFenceFunction end_fence;
+  Kokkos_Profiling_dualViewSyncFunction sync_dual_view;
+  Kokkos_Profiling_dualViewModifyFunction modify_dual_view;
+  Kokkos_Profiling_declareMetadataFunction declare_metadata;
+  Kokkos_Tools_provideToolProgrammingInterfaceFunction
+      provide_tool_programming_interface;
+  Kokkos_Tools_requestToolSettingsFunction request_tool_settings;
+  char profiling_padding[9 * sizeof(Kokkos_Tools_functionPointer)];
+  Kokkos_Tools_outputTypeDeclarationFunction declare_output_type;
+  Kokkos_Tools_inputTypeDeclarationFunction declare_input_type;
+  Kokkos_Tools_requestValueFunction request_output_values;
+  Kokkos_Tools_contextBeginFunction begin_tuning_context;
+  Kokkos_Tools_contextEndFunction end_tuning_context;
+  Kokkos_Tools_optimizationGoalDeclarationFunction declare_optimization_goal;
+  char padding[232 *
+               sizeof(
+                   Kokkos_Tools_functionPointer)];  // allows us to add another
+                                                    // 256 events to the Tools
+                                                    // interface without
+                                                    // changing struct layout
+};
+
+#endif  // KOKKOS_PROFILING_C_INTERFACE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_DeviceInfo.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_DeviceInfo.hpp
new file mode 100644 (file)
index 0000000..be6f756
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ //@HEADER
+ // ************************************************************************
+ //
+ //                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+ //
+ // Under the terms of Contract DE-NA0003525 with NTESS,
+ // the U.S. Government retains certain rights in this software.
+ //
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are
+ // met:
+ //
+ // 1. Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ //
+ // 2. Redistributions in binary form must reproduce the above copyright
+ // notice, this list of conditions and the following disclaimer in the
+ // documentation and/or other materials provided with the distribution.
+ //
+ // 3. Neither the name of the Corporation nor the names of the
+ // contributors may be used to endorse or promote products derived from
+ // this software without specific prior written permission.
+ //
+ // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ //
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+ //
+ // ************************************************************************
+ //@HEADER
+*/
+
+#ifndef KOKKOSP_DEVICE_INFO_HPP
+#define KOKKOSP_DEVICE_INFO_HPP
+
+#include <cstdint>
+#include <impl/Kokkos_Profiling_C_Interface.h>
+namespace Kokkos {
+namespace Profiling {
+using KokkosPDeviceInfo = Kokkos_Profiling_KokkosPDeviceInfo;
+}  // namespace Profiling
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_Interface.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Profiling_Interface.hpp
new file mode 100644 (file)
index 0000000..428a3cb
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ //@HEADER
+ // ************************************************************************
+ //
+ //                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+ //
+ // Under the terms of Contract DE-NA0003525 with NTESS,
+ // the U.S. Government retains certain rights in this software.
+ //
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are
+ // met:
+ //
+ // 1. Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ //
+ // 2. Redistributions in binary form must reproduce the above copyright
+ // notice, this list of conditions and the following disclaimer in the
+ // documentation and/or other materials provided with the distribution.
+ //
+ // 3. Neither the name of the Corporation nor the names of the
+ // contributors may be used to endorse or promote products derived from
+ // this software without specific prior written permission.
+ //
+ // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+ // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+ // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ //
+ // Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+ //
+ // ************************************************************************
+ //@HEADER
+ */
+
+#ifndef KOKKOSP_INTERFACE_HPP
+#define KOKKOSP_INTERFACE_HPP
+
+#include <cinttypes>
+#include <cstddef>
+#include <climits>
+
+#include <cstdlib>
+
+// NOTE: in this Kokkos::Profiling block, do not define anything that shouldn't
+// exist should Profiling be disabled
+
+namespace Kokkos {
+namespace Tools {
+namespace Experimental {
+
+constexpr const uint32_t NumReservedDeviceIDs = 1;
+
+enum SpecialSynchronizationCases : int {
+  GlobalDeviceSynchronization     = 1,
+  DeepCopyResourceSynchronization = 2,
+};
+
+enum struct DeviceType {
+  Serial,
+  OpenMP,
+  Cuda,
+  HIP,
+  OpenMPTarget,
+  HPX,
+  Threads,
+  SYCL,
+  OpenACC,
+  Unknown
+};
+
+struct ExecutionSpaceIdentifier {
+  DeviceType type;
+  uint32_t device_id;
+  uint32_t instance_id;
+};
+
+constexpr const uint32_t num_type_bits     = 8;
+constexpr const uint32_t num_device_bits   = 7;
+constexpr const uint32_t num_instance_bits = 17;
+constexpr const uint32_t num_avail_bits    = sizeof(uint32_t) * CHAR_BIT;
+
+inline DeviceType devicetype_from_uint32t(const uint32_t in) {
+  switch (in) {
+    case 0: return DeviceType::Serial;
+    case 1: return DeviceType::OpenMP;
+    case 2: return DeviceType::Cuda;
+    case 3: return DeviceType::HIP;
+    case 4: return DeviceType::OpenMPTarget;
+    case 5: return DeviceType::HPX;
+    case 6: return DeviceType::Threads;
+    case 7: return DeviceType::SYCL;
+    case 8: return DeviceType::OpenACC;
+    default: return DeviceType::Unknown;  // TODO: error out?
+  }
+}
+
+inline ExecutionSpaceIdentifier identifier_from_devid(const uint32_t in) {
+  constexpr const uint32_t shift = num_avail_bits - num_type_bits;
+
+  return {devicetype_from_uint32t(in >> shift), /*First 8 bits*/
+          (~((uint32_t(-1)) << num_device_bits)) &
+              (in >> num_instance_bits),                  /*Next 7 bits */
+          (~((uint32_t(-1)) << num_instance_bits)) & in}; /*Last 17 bits*/
+}
+
+template <typename ExecutionSpace>
+struct DeviceTypeTraits;
+
+template <typename ExecutionSpace>
+constexpr uint32_t device_id_root() {
+  constexpr auto device_id =
+      static_cast<uint32_t>(DeviceTypeTraits<ExecutionSpace>::id);
+  return (device_id << (num_instance_bits + num_device_bits));
+}
+template <typename ExecutionSpace>
+inline uint32_t device_id(ExecutionSpace const& space) noexcept {
+  return device_id_root<ExecutionSpace>() +
+         (DeviceTypeTraits<ExecutionSpace>::device_id(space)
+          << num_instance_bits) +
+         space.impl_instance_id();
+}
+}  // namespace Experimental
+}  // namespace Tools
+}  // end namespace Kokkos
+
+#if defined(KOKKOS_ENABLE_LIBDL)
+// We check at configure time that libdl is available.
+#include <dlfcn.h>
+#endif
+
+#include <impl/Kokkos_Profiling_DeviceInfo.hpp>
+#include <impl/Kokkos_Profiling_C_Interface.h>
+
+namespace Kokkos {
+namespace Tools {
+
+using SpaceHandle = Kokkos_Profiling_SpaceHandle;
+
+}  // namespace Tools
+
+namespace Tools {
+
+namespace Experimental {
+using EventSet = Kokkos_Profiling_EventSet;
+static_assert(sizeof(EventSet) / sizeof(Kokkos_Tools_functionPointer) == 275,
+              "sizeof EventSet has changed, this is an error on the part of a "
+              "Kokkos developer");
+static_assert(sizeof(Kokkos_Tools_ToolSettings) / sizeof(bool) == 256,
+              "sizeof EventSet has changed, this is an error on the part of a "
+              "Kokkos developer");
+static_assert(sizeof(Kokkos_Tools_ToolProgrammingInterface) /
+                      sizeof(Kokkos_Tools_functionPointer) ==
+                  32,
+              "sizeof EventSet has changed, this is an error on the part of a "
+              "Kokkos developer");
+
+using toolInvokedFenceFunction = Kokkos_Tools_toolInvokedFenceFunction;
+using provideToolProgrammingInterfaceFunction =
+    Kokkos_Tools_provideToolProgrammingInterfaceFunction;
+using requestToolSettingsFunction = Kokkos_Tools_requestToolSettingsFunction;
+using ToolSettings                = Kokkos_Tools_ToolSettings;
+using ToolProgrammingInterface    = Kokkos_Tools_ToolProgrammingInterface;
+}  // namespace Experimental
+using initFunction           = Kokkos_Profiling_initFunction;
+using finalizeFunction       = Kokkos_Profiling_finalizeFunction;
+using parseArgsFunction      = Kokkos_Profiling_parseArgsFunction;
+using printHelpFunction      = Kokkos_Profiling_printHelpFunction;
+using beginFunction          = Kokkos_Profiling_beginFunction;
+using endFunction            = Kokkos_Profiling_endFunction;
+using pushFunction           = Kokkos_Profiling_pushFunction;
+using popFunction            = Kokkos_Profiling_popFunction;
+using allocateDataFunction   = Kokkos_Profiling_allocateDataFunction;
+using deallocateDataFunction = Kokkos_Profiling_deallocateDataFunction;
+using createProfileSectionFunction =
+    Kokkos_Profiling_createProfileSectionFunction;
+using startProfileSectionFunction =
+    Kokkos_Profiling_startProfileSectionFunction;
+using stopProfileSectionFunction = Kokkos_Profiling_stopProfileSectionFunction;
+using destroyProfileSectionFunction =
+    Kokkos_Profiling_destroyProfileSectionFunction;
+using profileEventFunction    = Kokkos_Profiling_profileEventFunction;
+using beginDeepCopyFunction   = Kokkos_Profiling_beginDeepCopyFunction;
+using endDeepCopyFunction     = Kokkos_Profiling_endDeepCopyFunction;
+using beginFenceFunction      = Kokkos_Profiling_beginFenceFunction;
+using endFenceFunction        = Kokkos_Profiling_endFenceFunction;
+using dualViewSyncFunction    = Kokkos_Profiling_dualViewSyncFunction;
+using dualViewModifyFunction  = Kokkos_Profiling_dualViewModifyFunction;
+using declareMetadataFunction = Kokkos_Profiling_declareMetadataFunction;
+
+}  // namespace Tools
+
+}  // namespace Kokkos
+
+// Profiling
+
+namespace Kokkos {
+
+namespace Profiling {
+
+/** The Profiling namespace is being renamed to Tools.
+ * This is reexposing the contents of what used to be the Profiling
+ * Interface with their original names, to avoid breaking old code
+ */
+
+namespace Experimental {
+
+using Kokkos::Tools::Experimental::device_id;
+using Kokkos::Tools::Experimental::DeviceType;
+using Kokkos::Tools::Experimental::DeviceTypeTraits;
+
+}  // namespace Experimental
+
+using Kokkos::Tools::allocateDataFunction;
+using Kokkos::Tools::beginDeepCopyFunction;
+using Kokkos::Tools::beginFunction;
+using Kokkos::Tools::createProfileSectionFunction;
+using Kokkos::Tools::deallocateDataFunction;
+using Kokkos::Tools::destroyProfileSectionFunction;
+using Kokkos::Tools::endDeepCopyFunction;
+using Kokkos::Tools::endFunction;
+using Kokkos::Tools::finalizeFunction;
+using Kokkos::Tools::initFunction;
+using Kokkos::Tools::parseArgsFunction;
+using Kokkos::Tools::popFunction;
+using Kokkos::Tools::printHelpFunction;
+using Kokkos::Tools::profileEventFunction;
+using Kokkos::Tools::pushFunction;
+using Kokkos::Tools::SpaceHandle;
+using Kokkos::Tools::startProfileSectionFunction;
+using Kokkos::Tools::stopProfileSectionFunction;
+
+}  // namespace Profiling
+}  // namespace Kokkos
+
+// Tuning
+
+namespace Kokkos {
+namespace Tools {
+namespace Experimental {
+using ValueSet            = Kokkos_Tools_ValueSet;
+using ValueRange          = Kokkos_Tools_ValueRange;
+using StatisticalCategory = Kokkos_Tools_VariableInfo_StatisticalCategory;
+using ValueType           = Kokkos_Tools_VariableInfo_ValueType;
+using CandidateValueType  = Kokkos_Tools_VariableInfo_CandidateValueType;
+using SetOrRange          = Kokkos_Tools_VariableInfo_SetOrRange;
+using VariableInfo        = Kokkos_Tools_VariableInfo;
+using OptimizationGoal    = Kokkos_Tools_OptimzationGoal;
+using TuningString        = Kokkos_Tools_Tuning_String;
+using VariableValue       = Kokkos_Tools_VariableValue;
+
+using outputTypeDeclarationFunction =
+    Kokkos_Tools_outputTypeDeclarationFunction;
+using inputTypeDeclarationFunction = Kokkos_Tools_inputTypeDeclarationFunction;
+using requestValueFunction         = Kokkos_Tools_requestValueFunction;
+using contextBeginFunction         = Kokkos_Tools_contextBeginFunction;
+using contextEndFunction           = Kokkos_Tools_contextEndFunction;
+using optimizationGoalDeclarationFunction =
+    Kokkos_Tools_optimizationGoalDeclarationFunction;
+}  // end namespace Experimental
+}  // end namespace Tools
+
+}  // end namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_QuadPrecisionMath.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_QuadPrecisionMath.hpp
new file mode 100644 (file)
index 0000000..c7936e9
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_QUAD_PRECISION_MATH_HPP
+#define KOKKOS_QUAD_PRECISION_MATH_HPP
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ENABLE_LIBQUADMATH)
+
+#include <Kokkos_NumericTraits.hpp>
+#include <Kokkos_MathematicalConstants.hpp>
+#include <Kokkos_MathematicalFunctions.hpp>
+
+#include <quadmath.h>
+
+#if !(defined(__FLOAT128__) || defined(__SIZEOF_FLOAT128__))
+#error __float128 not supported on this host
+#endif
+
+//<editor-fold desc="numeric traits __float128 specializations">
+namespace Kokkos {
+namespace Experimental {
+#if defined(KOKKOS_ENABLE_CXX17)
+#define KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(TRAIT, TYPE, VALUE_TYPE, VALUE) \
+  template <>                                                                \
+  struct TRAIT<TYPE> {                                                       \
+    static constexpr VALUE_TYPE value = VALUE;                               \
+  };                                                                         \
+  template <>                                                                \
+  inline constexpr auto TRAIT##_v<TYPE> = TRAIT<TYPE>::value;
+#else
+#define KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(TRAIT, TYPE, VALUE_TYPE, VALUE) \
+  template <>                                                                \
+  struct TRAIT<TYPE> {                                                       \
+    static constexpr VALUE_TYPE value = VALUE;                               \
+  };
+#endif
+
+// clang-format off
+// Numeric distinguished value traits
+// Workaround GCC bug https://godbolt.org/z/qWb5oe4dx
+// error: '__builtin_huge_valq()' is not a constant expression
+#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 710)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(infinity,       __float128, __float128, HUGE_VALQ)
+#endif
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(finite_min,     __float128, __float128, -FLT128_MAX)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(finite_max,     __float128, __float128, FLT128_MAX)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(epsilon,        __float128, __float128, FLT128_EPSILON)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(round_error,    __float128, __float128, static_cast<__float128>(0.5))
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(norm_min,       __float128, __float128, FLT128_MIN)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(denorm_min,     __float128, __float128, FLT128_DENORM_MIN)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(reciprocal_overflow_threshold, __float128, __float128, FLT128_MIN)
+#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 710)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(quiet_NaN,      __float128, __float128, __builtin_nanq(""))
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(signaling_NaN,  __float128, __float128, __builtin_nansq(""))
+#endif
+
+// Numeric characteristics traits
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(digits,         __float128,        int, FLT128_MANT_DIG)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(digits10,       __float128,        int, FLT128_DIG)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(max_digits10,   __float128,        int, 36)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(radix,          __float128,        int, 2)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(min_exponent,   __float128,        int, FLT128_MIN_EXP)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(max_exponent,   __float128,        int, FLT128_MAX_EXP)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(min_exponent10, __float128,        int, FLT128_MIN_10_EXP)
+KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT(max_exponent10, __float128,        int, FLT128_MAX_10_EXP)
+// clang-format on
+
+#undef KOKKOS_IMPL_SPECIALIZE_NUMERIC_TRAIT
+}  // namespace Experimental
+}  // namespace Kokkos
+//</editor-fold>
+
+namespace Kokkos {
+template <>
+struct reduction_identity<__float128> {
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static __float128 sum() {
+    return static_cast<__float128>(0.0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static __float128 prod() {
+    return static_cast<__float128>(1.0);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static __float128 max() {
+    return -FLT128_MAX;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr static __float128 min() {
+    return FLT128_MAX;
+  }
+};
+}  // namespace Kokkos
+
+//<editor-fold desc="Common mathematical functions __float128 overloads">
+namespace Kokkos {
+// clang-format off
+namespace Impl {
+template <> struct promote<__float128> { using type = __float128; };
+}
+// Basic operations
+inline __float128 abs(__float128 x) { return ::fabsq(x); }
+inline __float128 fabs(__float128 x) { return ::fabsq(x); }
+inline __float128 fmod(__float128 x, __float128 y) { return ::fmodq(x, y); }
+inline __float128 remainder(__float128 x, __float128 y) { return ::remainderq(x, y); }
+// remquo
+// fma
+inline __float128 fmax(__float128 x, __float128 y) { return ::fmaxq(x, y); }
+inline __float128 fmin(__float128 x, __float128 y) { return ::fminq(x, y); }
+inline __float128 fdim(__float128 x, __float128 y) { return ::fdimq(x, y); }
+inline __float128 nanq(char const* arg) { return ::nanq(arg); }
+// Exponential functions
+inline __float128 exp(__float128 x) { return ::expq(x); }
+#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 910)
+inline __float128 exp2(__float128 x) { return ::exp2q(x); }
+#endif
+inline __float128 expm1(__float128 x) { return ::expm1q(x); }
+inline __float128 log(__float128 x) { return ::logq(x); }
+inline __float128 log10(__float128 x) { return ::log10q(x); }
+inline __float128 log2(__float128 x) { return ::log2q(x); }
+inline __float128 log1p(__float128 x) { return ::log1pq(x); }
+// Power functions
+inline __float128 pow(__float128 x, __float128 y) { return ::powq(x, y); }
+inline __float128 sqrt(__float128 x) { return ::sqrtq(x); }
+inline __float128 cbrt(__float128 x) { return ::cbrtq(x); }
+inline __float128 hypot(__float128 x, __float128 y) { return ::hypotq(x, y); }
+// Trigonometric functions
+inline __float128 sin(__float128 x) { return ::sinq(x); }
+inline __float128 cos(__float128 x) { return ::cosq(x); }
+inline __float128 tan(__float128 x) { return ::tanq(x); }
+inline __float128 asin(__float128 x) { return ::asinq(x); }
+inline __float128 acos(__float128 x) { return ::acosq(x); }
+inline __float128 atan(__float128 x) { return ::atanq(x); }
+inline __float128 atan2(__float128 x, __float128 y) { return ::atan2q(x, y); }
+// Hyperbolic functions
+inline __float128 sinh(__float128 x) { return ::sinhq(x); }
+inline __float128 cosh(__float128 x) { return ::coshq(x); }
+inline __float128 tanh(__float128 x) { return ::tanhq(x); }
+inline __float128 asinh(__float128 x) { return ::asinhq(x); }
+inline __float128 acosh(__float128 x) { return ::acoshq(x); }
+inline __float128 atanh(__float128 x) { return ::atanhq(x); }
+// Error and gamma functions
+inline __float128 erf(__float128 x) { return ::erfq(x); }
+inline __float128 erfc(__float128 x) { return ::erfcq(x); }
+inline __float128 tgamma(__float128 x) { return ::tgammaq(x); }
+inline __float128 lgamma(__float128 x) { return ::lgammaq(x); }
+// Nearest integer floating point operations
+inline __float128 ceil(__float128 x) { return ::ceilq(x); }
+inline __float128 floor(__float128 x) { return ::floorq(x); }
+inline __float128 trunc(__float128 x) { return ::truncq(x); }
+inline __float128 round(__float128 x) { return ::roundq(x); }
+// lround
+// llround
+inline __float128 nearbyint(__float128 x) { return ::nearbyintq(x); }
+// rint
+// lrint
+// llrint
+// Floating point manipulation functions
+// frexp
+// ldexp
+// modf
+// scalbn
+// scalbln
+// ilog
+#if defined(KOKKOS_COMPILER_GNU) && (KOKKOS_COMPILER_GNU >= 610)
+inline __float128 logb(__float128 x) { return ::logbq(x); }
+#endif
+inline __float128 nextafter(__float128 x, __float128 y) { return ::nextafterq(x, y); }
+// nexttoward
+inline __float128 copysign(__float128 x, __float128 y) { return ::copysignq(x, y); }
+// Classification and comparison
+// fpclassify
+inline bool isfinite(__float128 x) { return !::isinfq(x); }  // isfiniteq not provided
+inline bool isinf(__float128 x) { return ::isinfq(x); }
+inline bool isnan(__float128 x) { return ::isnanq(x); }
+// isnormal
+inline bool signbit(__float128 x) { return ::signbitq(x); }
+// isgreater
+// isgreaterequal
+// isless
+// islessequal
+// islessgreater
+// isunordered
+// clang-format on
+}  // namespace Kokkos
+//</editor-fold>
+
+//<editor-fold desc="Mathematical constants __float128 specializations">
+namespace Kokkos {
+namespace Experimental {
+// clang-format off
+template <> constexpr __float128 e_v         <__float128> = 2.718281828459045235360287471352662498Q;
+template <> constexpr __float128 log2e_v     <__float128> = 1.442695040888963407359924681001892137Q;
+template <> constexpr __float128 log10e_v    <__float128> = 0.434294481903251827651128918916605082Q;
+template <> constexpr __float128 pi_v        <__float128> = 3.141592653589793238462643383279502884Q;
+template <> constexpr __float128 inv_pi_v    <__float128> = 0.318309886183790671537767526745028724Q;
+template <> constexpr __float128 inv_sqrtpi_v<__float128> = 0.564189583547756286948079451560772586Q;
+template <> constexpr __float128 ln2_v       <__float128> = 0.693147180559945309417232121458176568Q;
+template <> constexpr __float128 ln10_v      <__float128> = 2.302585092994045684017991454684364208Q;
+template <> constexpr __float128 sqrt2_v     <__float128> = 1.414213562373095048801688724209698079Q;
+template <> constexpr __float128 sqrt3_v     <__float128> = 1.732050807568877293527446341505872367Q;
+template <> constexpr __float128 inv_sqrt3_v <__float128> = 0.577350269189625764509148780501957456Q;
+template <> constexpr __float128 egamma_v    <__float128> = 0.577215664901532860606512090082402431Q;
+template <> constexpr __float128 phi_v       <__float128> = 1.618033988749894848204586834365638118Q;
+// clang-format on
+}  // namespace Experimental
+}  // namespace Kokkos
+//</editor-fold>
+
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc.cpp
new file mode 100644 (file)
index 0000000..aff6332
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Core.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+thread_local int SharedAllocationRecord<void, void>::t_tracking_enabled = 1;
+
+#ifdef KOKKOS_ENABLE_DEBUG
+bool SharedAllocationRecord<void, void>::is_sane(
+    SharedAllocationRecord<void, void>* arg_record) {
+  SharedAllocationRecord* const root =
+      arg_record ? arg_record->m_root : nullptr;
+
+  bool ok = root != nullptr && root->use_count() == 0;
+
+  if (ok) {
+    SharedAllocationRecord* root_next             = nullptr;
+    static constexpr SharedAllocationRecord* zero = nullptr;
+    // Lock the list:
+    while ((root_next = Kokkos::atomic_exchange(&root->m_next, zero)) ==
+           nullptr)
+      ;
+
+    for (SharedAllocationRecord* rec = root_next; ok && rec != root;
+         rec                         = rec->m_next) {
+      const bool ok_non_null =
+          rec && rec->m_prev && (rec == root || rec->m_next);
+      const bool ok_root = ok_non_null && rec->m_root == root;
+      const bool ok_prev_next =
+          ok_non_null &&
+          (rec->m_prev != root ? rec->m_prev->m_next == rec : root_next == rec);
+      const bool ok_next_prev = ok_non_null && rec->m_next->m_prev == rec;
+      const bool ok_count     = ok_non_null && 0 <= rec->use_count();
+
+      ok = ok_root && ok_prev_next && ok_next_prev && ok_count;
+
+      if (!ok) {
+        // Formatting dependent on sizeof(uintptr_t)
+        const char* format_string;
+
+        if (sizeof(uintptr_t) == sizeof(unsigned long)) {
+          format_string =
+              "Kokkos::Impl::SharedAllocationRecord failed is_sane: "
+              "rec(0x%.12lx){ m_count(%d) m_root(0x%.12lx) m_next(0x%.12lx) "
+              "m_prev(0x%.12lx) m_next->m_prev(0x%.12lx) "
+              "m_prev->m_next(0x%.12lx) }\n";
+        } else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
+          format_string =
+              "Kokkos::Impl::SharedAllocationRecord failed is_sane: "
+              "rec(0x%.12llx){ m_count(%d) m_root(0x%.12llx) m_next(0x%.12llx) "
+              "m_prev(0x%.12llx) m_next->m_prev(0x%.12llx) "
+              "m_prev->m_next(0x%.12llx) }\n";
+        }
+
+        fprintf(stderr, format_string, reinterpret_cast<uintptr_t>(rec),
+                rec->use_count(), reinterpret_cast<uintptr_t>(rec->m_root),
+                reinterpret_cast<uintptr_t>(rec->m_next),
+                reinterpret_cast<uintptr_t>(rec->m_prev),
+                reinterpret_cast<uintptr_t>(
+                    rec->m_next != nullptr ? rec->m_next->m_prev : nullptr),
+                reinterpret_cast<uintptr_t>(rec->m_prev != rec->m_root
+                                                ? rec->m_prev->m_next
+                                                : root_next));
+      }
+    }
+
+    if (nullptr != Kokkos::atomic_exchange(&root->m_next, root_next)) {
+      Kokkos::Impl::throw_runtime_exception(
+          "Kokkos::Impl::SharedAllocationRecord failed is_sane unlocking");
+    }
+  }
+  return ok;
+}
+
+#else
+
+bool SharedAllocationRecord<void, void>::is_sane(
+    SharedAllocationRecord<void, void>*) {
+  Kokkos::Impl::throw_runtime_exception(
+      "Kokkos::Impl::SharedAllocationRecord::is_sane only works with "
+      "KOKKOS_ENABLE_DEBUG enabled");
+  return false;
+}
+#endif  //#ifdef KOKKOS_ENABLE_DEBUG
+
+#ifdef KOKKOS_ENABLE_DEBUG
+SharedAllocationRecord<void, void>* SharedAllocationRecord<void, void>::find(
+    SharedAllocationRecord<void, void>* const arg_root,
+    void* const arg_data_ptr) {
+  SharedAllocationRecord* root_next             = nullptr;
+  static constexpr SharedAllocationRecord* zero = nullptr;
+
+  // Lock the list:
+  while ((root_next = Kokkos::atomic_exchange(&arg_root->m_next, zero)) ==
+         nullptr)
+    ;
+
+  // Iterate searching for the record with this data pointer
+
+  SharedAllocationRecord* r = root_next;
+
+  while ((r != arg_root) && (r->data() != arg_data_ptr)) {
+    r = r->m_next;
+  }
+
+  if (r == arg_root) {
+    r = nullptr;
+  }
+
+  if (nullptr != Kokkos::atomic_exchange(&arg_root->m_next, root_next)) {
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::SharedAllocationRecord failed locking/unlocking");
+  }
+  return r;
+}
+#else
+SharedAllocationRecord<void, void>* SharedAllocationRecord<void, void>::find(
+    SharedAllocationRecord<void, void>* const, void* const) {
+  Kokkos::Impl::throw_runtime_exception(
+      "Kokkos::Impl::SharedAllocationRecord::find only works with "
+      "KOKKOS_ENABLE_DEBUG "
+      "enabled");
+  return nullptr;
+}
+#endif
+
+/**\brief  Construct and insert into 'arg_root' tracking set.
+ *         use_count is zero.
+ */
+SharedAllocationRecord<void, void>::SharedAllocationRecord(
+#ifdef KOKKOS_ENABLE_DEBUG
+    SharedAllocationRecord<void, void>* arg_root,
+#endif
+    SharedAllocationHeader* arg_alloc_ptr, size_t arg_alloc_size,
+    SharedAllocationRecord<void, void>::function_type arg_dealloc,
+    const std::string& label)
+    : m_alloc_ptr(arg_alloc_ptr),
+      m_alloc_size(arg_alloc_size),
+      m_dealloc(arg_dealloc)
+#ifdef KOKKOS_ENABLE_DEBUG
+      ,
+      m_root(arg_root),
+      m_prev(nullptr),
+      m_next(nullptr)
+#endif
+      ,
+      m_count(0),
+      m_label(label) {
+  if (nullptr != arg_alloc_ptr) {
+#ifdef KOKKOS_ENABLE_DEBUG
+    // Insert into the root double-linked list for tracking
+    //
+    // before:  arg_root->m_next == next ; next->m_prev == arg_root
+    // after:   arg_root->m_next == this ; this->m_prev == arg_root ;
+    //              this->m_next == next ; next->m_prev == this
+
+    m_prev                                        = m_root;
+    static constexpr SharedAllocationRecord* zero = nullptr;
+
+    // Read root->m_next and lock by setting to nullptr
+    while ((m_next = Kokkos::atomic_exchange(&m_root->m_next, zero)) == nullptr)
+      ;
+
+    m_next->m_prev = this;
+
+    // memory fence before completing insertion into linked list
+    Kokkos::memory_fence();
+
+    if (nullptr != Kokkos::atomic_exchange(&m_root->m_next, this)) {
+      Kokkos::Impl::throw_runtime_exception(
+          "Kokkos::Impl::SharedAllocationRecord failed locking/unlocking");
+    }
+#endif
+
+  } else {
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::SharedAllocationRecord given nullptr allocation");
+  }
+}
+
+void SharedAllocationRecord<void, void>::increment(
+    SharedAllocationRecord<void, void>* arg_record) {
+  const int old_count = Kokkos::atomic_fetch_add(&arg_record->m_count, 1);
+
+  if (old_count < 0) {  // Error
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::SharedAllocationRecord failed increment");
+  }
+}
+
+SharedAllocationRecord<void, void>* SharedAllocationRecord<
+    void, void>::decrement(SharedAllocationRecord<void, void>* arg_record) {
+  const int old_count = Kokkos::atomic_fetch_sub(&arg_record->m_count, 1);
+
+  if (old_count == 1) {
+    if (is_finalized()) {
+      std::stringstream ss;
+      ss << "Kokkos allocation \"";
+      ss << arg_record->get_label();
+      ss << "\" is being deallocated after Kokkos::finalize was called\n";
+      auto s = ss.str();
+      Kokkos::Impl::throw_runtime_exception(s);
+    }
+
+#ifdef KOKKOS_ENABLE_DEBUG
+    // before:  arg_record->m_prev->m_next == arg_record  &&
+    //          arg_record->m_next->m_prev == arg_record
+    //
+    // after:   arg_record->m_prev->m_next == arg_record->m_next  &&
+    //          arg_record->m_next->m_prev == arg_record->m_prev
+
+    SharedAllocationRecord* root_next             = nullptr;
+    static constexpr SharedAllocationRecord* zero = nullptr;
+
+    // Lock the list:
+    while ((root_next = Kokkos::atomic_exchange(&arg_record->m_root->m_next,
+                                                zero)) == nullptr)
+      ;
+    // We need a memory_fence() here so that the following update
+    // is properly sequenced
+    Kokkos::memory_fence();
+
+    arg_record->m_next->m_prev = arg_record->m_prev;
+
+    if (root_next != arg_record) {
+      arg_record->m_prev->m_next = arg_record->m_next;
+    } else {
+      // before:  arg_record->m_root == arg_record->m_prev
+      // after:   arg_record->m_root == arg_record->m_next
+      root_next = arg_record->m_next;
+    }
+
+    Kokkos::memory_fence();
+
+    // Unlock the list:
+    if (nullptr !=
+        Kokkos::atomic_exchange(&arg_record->m_root->m_next, root_next)) {
+      Kokkos::Impl::throw_runtime_exception(
+          "Kokkos::Impl::SharedAllocationRecord failed decrement unlocking");
+    }
+
+    arg_record->m_next = nullptr;
+    arg_record->m_prev = nullptr;
+#endif
+
+    function_type d = arg_record->m_dealloc;
+    (*d)(arg_record);
+    arg_record = nullptr;
+  } else if (old_count < 1) {  // Error
+    fprintf(stderr,
+            "Kokkos::Impl::SharedAllocationRecord '%s' failed decrement count "
+            "= %d\n",
+            arg_record->m_alloc_ptr->m_label, old_count);
+    fflush(stderr);
+    Kokkos::Impl::throw_runtime_exception(
+        "Kokkos::Impl::SharedAllocationRecord failed decrement count");
+  }
+
+  return arg_record;
+}
+
+#ifdef KOKKOS_ENABLE_DEBUG
+void SharedAllocationRecord<void, void>::print_host_accessible_records(
+    std::ostream& s, const char* const space_name,
+    const SharedAllocationRecord* const root, const bool detail) {
+  const SharedAllocationRecord<void, void>* r = root;
+
+  char buffer[256];
+
+  if (detail) {
+    do {
+      // Formatting dependent on sizeof(uintptr_t)
+      const char* format_string;
+
+      if (sizeof(uintptr_t) == sizeof(unsigned long)) {
+        format_string =
+            "%s addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx + "
+            "%.8ld ] count(%d) dealloc(0x%.12lx) %s\n";
+      } else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
+        format_string =
+            "%s addr( 0x%.12llx ) list( 0x%.12llx 0x%.12llx ) extent[ "
+            "0x%.12llx + %.8ld ] count(%d) dealloc(0x%.12llx) %s\n";
+      }
+
+      snprintf(buffer, 256, format_string, space_name,
+               reinterpret_cast<uintptr_t>(r),
+               reinterpret_cast<uintptr_t>(r->m_prev),
+               reinterpret_cast<uintptr_t>(r->m_next),
+               reinterpret_cast<uintptr_t>(r->m_alloc_ptr), r->m_alloc_size,
+               r->use_count(), reinterpret_cast<uintptr_t>(r->m_dealloc),
+               r->m_alloc_ptr->m_label);
+      s << buffer;
+      r = r->m_next;
+    } while (r != root);
+  } else {
+    do {
+      if (r->m_alloc_ptr) {
+        // Formatting dependent on sizeof(uintptr_t)
+        const char* format_string;
+
+        if (sizeof(uintptr_t) == sizeof(unsigned long)) {
+          format_string = "%s [ 0x%.12lx + %ld ] %s\n";
+        } else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
+          format_string = "%s [ 0x%.12llx + %ld ] %s\n";
+        }
+
+        snprintf(buffer, 256, format_string, space_name,
+                 reinterpret_cast<uintptr_t>(r->data()), r->size(),
+                 r->m_alloc_ptr->m_label);
+      } else {
+        snprintf(buffer, 256, "%s [ 0 + 0 ]\n", space_name);
+      }
+      s << buffer;
+      r = r->m_next;
+    } while (r != root);
+  }
+}
+#else
+void SharedAllocationRecord<void, void>::print_host_accessible_records(
+    std::ostream&, const char* const, const SharedAllocationRecord* const,
+    const bool) {
+  Kokkos::Impl::throw_runtime_exception(
+      "Kokkos::Impl::SharedAllocationRecord::print_host_accessible_records"
+      " only works with KOKKOS_ENABLE_DEBUG enabled");
+}
+#endif
+
+} /* namespace Impl */
+} /* namespace Kokkos */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc.hpp
new file mode 100644 (file)
index 0000000..02dcd1e
--- /dev/null
@@ -0,0 +1,539 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SHARED_ALLOC_HPP
+#define KOKKOS_SHARED_ALLOC_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Core_fwd.hpp>
+#include <impl/Kokkos_Error.hpp>  // Impl::throw_runtime_exception
+
+#include <cstdint>
+#include <string>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class MemorySpace = void, class DestroyFunctor = void>
+class SharedAllocationRecord;
+
+template <class MemorySpace>
+class SharedAllocationRecordCommon;
+
+class SharedAllocationHeader {
+ private:
+  using Record = SharedAllocationRecord<void, void>;
+
+#ifdef KOKKOS_ARCH_VEGA
+  static constexpr unsigned maximum_label_length =
+      (1u << 8 /* 256 */) - sizeof(Record*);
+#else
+  static constexpr unsigned maximum_label_length =
+      (1u << 7 /* 128 */) - sizeof(Record*);
+#endif
+
+  template <class, class>
+  friend class SharedAllocationRecord;
+  template <class>
+  friend class SharedAllocationRecordCommon;
+  template <class>
+  friend class HostInaccessibleSharedAllocationRecordCommon;
+
+  Record* m_record;
+  char m_label[maximum_label_length];
+
+ public:
+  /* Given user memory get pointer to the header */
+  KOKKOS_INLINE_FUNCTION static const SharedAllocationHeader* get_header(
+      void const* alloc_ptr) {
+    return reinterpret_cast<SharedAllocationHeader const*>(
+        static_cast<char const*>(alloc_ptr) - sizeof(SharedAllocationHeader));
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  const char* label() const { return m_label; }
+};
+
+template <>
+class SharedAllocationRecord<void, void> {
+ protected:
+#ifdef KOKKOS_ARCH_VEGA
+  static_assert(sizeof(SharedAllocationHeader) == (1u << 8 /* 256 */),
+                "sizeof(SharedAllocationHeader) != 256");
+#else
+  static_assert(sizeof(SharedAllocationHeader) == (1u << 7 /* 128 */),
+                "sizeof(SharedAllocationHeader) != 128");
+#endif
+
+  template <class, class>
+  friend class SharedAllocationRecord;
+  template <class>
+  friend class SharedAllocationRecordCommon;
+  template <class>
+  friend class HostInaccessibleSharedAllocationRecordCommon;
+
+  using function_type = void (*)(SharedAllocationRecord<void, void>*);
+
+  SharedAllocationHeader* const m_alloc_ptr;
+  size_t const m_alloc_size;
+  function_type const m_dealloc;
+#ifdef KOKKOS_ENABLE_DEBUG
+  SharedAllocationRecord* const m_root;
+  SharedAllocationRecord* m_prev;
+  SharedAllocationRecord* m_next;
+#endif
+  int m_count;
+  std::string m_label;
+
+  SharedAllocationRecord(SharedAllocationRecord&&)      = delete;
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(SharedAllocationRecord&&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+  /**\brief  Construct and insert into 'arg_root' tracking set.
+   *         use_count is zero.
+   */
+  SharedAllocationRecord(
+#ifdef KOKKOS_ENABLE_DEBUG
+      SharedAllocationRecord* arg_root,
+#endif
+      SharedAllocationHeader* arg_alloc_ptr, size_t arg_alloc_size,
+      function_type arg_dealloc, const std::string& label);
+ private:
+  static thread_local int t_tracking_enabled;
+
+ public:
+  virtual std::string get_label() const { return std::string("Unmanaged"); }
+
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma push
+#pragma diag_suppress implicit_return_from_non_void_function
+#endif
+  static KOKKOS_FUNCTION int tracking_enabled() {
+    KOKKOS_IF_ON_HOST(return t_tracking_enabled;)
+    KOKKOS_IF_ON_DEVICE(return 0;)
+  }
+#if defined(__EDG__) && !defined(KOKKOS_COMPILER_INTEL)
+#pragma pop
+#endif
+
+  /**\brief A host process thread claims and disables the
+   *        shared allocation tracking flag.
+   */
+  static void tracking_disable() { t_tracking_enabled = 0; }
+
+  /**\brief A host process thread releases and enables the
+   *        shared allocation tracking flag.
+   */
+  static void tracking_enable() { t_tracking_enabled = 1; }
+
+  virtual ~SharedAllocationRecord() = default;
+
+  SharedAllocationRecord()
+      : m_alloc_ptr(nullptr),
+        m_alloc_size(0),
+        m_dealloc(nullptr)
+#ifdef KOKKOS_ENABLE_DEBUG
+        ,
+        m_root(this),
+        m_prev(this),
+        m_next(this)
+#endif
+        ,
+        m_count(0) {
+  }
+
+  static constexpr unsigned maximum_label_length =
+      SharedAllocationHeader::maximum_label_length;
+
+  KOKKOS_INLINE_FUNCTION
+  const SharedAllocationHeader* head() const { return m_alloc_ptr; }
+
+  /* User's memory begins at the end of the header */
+  KOKKOS_INLINE_FUNCTION
+  void* data() const { return static_cast<void*>(m_alloc_ptr + 1); }
+
+  /* User's memory begins at the end of the header */
+  size_t size() const { return m_alloc_size - sizeof(SharedAllocationHeader); }
+
+  /* Cannot be 'constexpr' because 'm_count' is volatile */
+  int use_count() const { return *static_cast<const volatile int*>(&m_count); }
+
+  /* Increment use count */
+  static void increment(SharedAllocationRecord*);
+
+  /* Decrement use count. If 1->0 then remove from the tracking list and invoke
+   * m_dealloc */
+  static SharedAllocationRecord* decrement(SharedAllocationRecord*);
+
+  /* Given a root record and data pointer find the record */
+  static SharedAllocationRecord* find(SharedAllocationRecord* const,
+                                      void* const);
+
+  /*  Sanity check for the whole set of records to which the input record
+   * belongs. Locks the set's insert/erase operations until the sanity check is
+   * complete.
+   */
+  static bool is_sane(SharedAllocationRecord*);
+
+  /*  Print host-accessible records */
+  static void print_host_accessible_records(
+      std::ostream&, const char* const space_name,
+      const SharedAllocationRecord* const root, const bool detail);
+};
+
+template <class MemorySpace>
+class SharedAllocationRecordCommon : public SharedAllocationRecord<void, void> {
+ private:
+  using derived_t     = SharedAllocationRecord<MemorySpace, void>;
+  using record_base_t = SharedAllocationRecord<void, void>;
+  derived_t& self() { return *static_cast<derived_t*>(this); }
+  derived_t const& self() const { return *static_cast<derived_t const*>(this); }
+
+ protected:
+  using record_base_t::record_base_t;
+
+  void _fill_host_accessible_header_info(SharedAllocationHeader& arg_header,
+                                         std::string const& arg_label);
+
+  static void deallocate(record_base_t* arg_rec);
+
+ public:
+  static auto allocate(MemorySpace const& arg_space,
+                       std::string const& arg_label, size_t arg_alloc_size)
+      -> derived_t*;
+  /**\brief  Allocate tracked memory in the space */
+  static void* allocate_tracked(MemorySpace const& arg_space,
+                                std::string const& arg_alloc_label,
+                                size_t arg_alloc_size);
+  /**\brief  Reallocate tracked memory in the space */
+  static void deallocate_tracked(void* arg_alloc_ptr);
+  /**\brief  Deallocate tracked memory in the space */
+  static void* reallocate_tracked(void* arg_alloc_ptr, size_t arg_alloc_size);
+  static auto get_record(void* alloc_ptr) -> derived_t*;
+  std::string get_label() const;
+  static void print_records(std::ostream& s, MemorySpace const&,
+                            bool detail = false);
+};
+
+template <class MemorySpace>
+class HostInaccessibleSharedAllocationRecordCommon
+    : public SharedAllocationRecordCommon<MemorySpace> {
+ private:
+  using base_t        = SharedAllocationRecordCommon<MemorySpace>;
+  using derived_t     = SharedAllocationRecord<MemorySpace, void>;
+  using record_base_t = SharedAllocationRecord<void, void>;
+
+ protected:
+  using base_t::base_t;
+
+ public:
+  static void print_records(std::ostream& s, MemorySpace const&,
+                            bool detail = false);
+  static auto get_record(void* alloc_ptr) -> derived_t*;
+  std::string get_label() const;
+};
+
+namespace {
+
+/* Taking the address of this function so make sure it is unique */
+template <class MemorySpace, class DestroyFunctor>
+void deallocate(SharedAllocationRecord<void, void>* record_ptr) {
+  using base_type = SharedAllocationRecord<MemorySpace, void>;
+  using this_type = SharedAllocationRecord<MemorySpace, DestroyFunctor>;
+
+  this_type* const ptr =
+      static_cast<this_type*>(static_cast<base_type*>(record_ptr));
+
+  ptr->m_destroy.destroy_shared_allocation();
+
+  delete ptr;
+}
+
+}  // namespace
+
+/*
+ *  Memory space specialization of SharedAllocationRecord< Space , void >
+ * requires :
+ *
+ *  SharedAllocationRecord< Space , void > : public SharedAllocationRecord< void
+ * , void >
+ *  {
+ *    // delete allocated user memory via static_cast to this type.
+ *    static void deallocate( const SharedAllocationRecord<void,void> * );
+ *    Space m_space ;
+ *  }
+ */
+template <class MemorySpace, class DestroyFunctor>
+class SharedAllocationRecord
+    : public SharedAllocationRecord<MemorySpace, void> {
+ private:
+  template <typename ExecutionSpace>
+  SharedAllocationRecord(const ExecutionSpace& execution_space,
+                         const MemorySpace& arg_space,
+                         const std::string& arg_label, const size_t arg_alloc)
+      /*  Allocate user memory as [ SharedAllocationHeader , user_memory ] */
+      : SharedAllocationRecord<MemorySpace, void>(
+            execution_space, arg_space, arg_label, arg_alloc,
+            &Kokkos::Impl::deallocate<MemorySpace, DestroyFunctor>),
+        m_destroy() {}
+
+  SharedAllocationRecord(const MemorySpace& arg_space,
+                         const std::string& arg_label, const size_t arg_alloc)
+      /*  Allocate user memory as [ SharedAllocationHeader , user_memory ] */
+      : SharedAllocationRecord<MemorySpace, void>(
+            arg_space, arg_label, arg_alloc,
+            &Kokkos::Impl::deallocate<MemorySpace, DestroyFunctor>),
+        m_destroy() {}
+
+  SharedAllocationRecord()                              = delete;
+  SharedAllocationRecord(const SharedAllocationRecord&) = delete;
+  SharedAllocationRecord& operator=(const SharedAllocationRecord&) = delete;
+
+ public:
+  DestroyFunctor m_destroy;
+
+  // Allocate with a zero use count.  Incrementing the use count from zero to
+  // one inserts the record into the tracking list.  Decrementing the count from
+  // one to zero removes from the tracking list and deallocates.
+  KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
+      const MemorySpace& arg_space, const std::string& arg_label,
+      const size_t arg_alloc) {
+    KOKKOS_IF_ON_HOST(
+        (return new SharedAllocationRecord(arg_space, arg_label, arg_alloc);))
+    KOKKOS_IF_ON_DEVICE(
+        ((void)arg_space; (void)arg_label; (void)arg_alloc; return nullptr;))
+  }
+
+  template <typename ExecutionSpace>
+  KOKKOS_INLINE_FUNCTION static SharedAllocationRecord* allocate(
+      const ExecutionSpace& exec_space, const MemorySpace& arg_space,
+      const std::string& arg_label, const size_t arg_alloc) {
+    KOKKOS_IF_ON_HOST(
+        (return new SharedAllocationRecord(exec_space, arg_space, arg_label,
+                                           arg_alloc);))
+    KOKKOS_IF_ON_DEVICE(((void)exec_space; (void)arg_space; (void)arg_label;
+                         (void)arg_alloc; return nullptr;))
+  }
+};
+
+template <class MemorySpace>
+class SharedAllocationRecord<MemorySpace, void>
+    : public SharedAllocationRecord<void, void> {};
+
+union SharedAllocationTracker {
+ private:
+  using Record = SharedAllocationRecord<void, void>;
+
+  enum : uintptr_t { DO_NOT_DEREF_FLAG = 0x01ul };
+
+  // The allocation record resides in Host memory space
+  uintptr_t m_record_bits;
+  Record* m_record;
+
+ public:
+  // Use macros instead of inline functions to reduce
+  // pressure on compiler optimization by reducing
+  // number of symbols and inline functions.
+
+#define KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT          \
+  KOKKOS_IF_ON_HOST((if (!(m_record_bits & DO_NOT_DEREF_FLAG)) { \
+    Record::increment(m_record);                                 \
+  }))
+
+#define KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT          \
+  KOKKOS_IF_ON_HOST((if (!(m_record_bits & DO_NOT_DEREF_FLAG)) { \
+    Record::decrement(m_record);                                 \
+  }))
+
+#define KOKKOS_IMPL_SHARED_ALLOCATION_CARRY_RECORD_BITS(rhs,               \
+                                                        override_tracking) \
+  (((!override_tracking) || (rhs.m_record_bits & DO_NOT_DEREF_FLAG) ||     \
+    (!Record::tracking_enabled()))                                         \
+       ? rhs.m_record_bits | DO_NOT_DEREF_FLAG                             \
+       : rhs.m_record_bits)
+
+  /** \brief  Assign a specialized record */
+  inline void assign_allocated_record_to_uninitialized(Record* arg_record) {
+    if (arg_record) {
+      Record::increment(m_record = arg_record);
+    } else {
+      m_record_bits = DO_NOT_DEREF_FLAG;
+    }
+  }
+
+  template <class MemorySpace>
+  constexpr SharedAllocationRecord<MemorySpace, void>* get_record() const
+      noexcept {
+    return (m_record_bits & DO_NOT_DEREF_FLAG)
+               ? nullptr
+               : static_cast<SharedAllocationRecord<MemorySpace, void>*>(
+                     m_record);
+  }
+
+  template <class MemorySpace>
+  std::string get_label() const {
+    return (m_record_bits == DO_NOT_DEREF_FLAG)
+               ? std::string()
+               : reinterpret_cast<SharedAllocationRecord<MemorySpace, void>*>(
+                     m_record_bits & ~DO_NOT_DEREF_FLAG)
+                     ->get_label();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int use_count() const {
+    KOKKOS_IF_ON_HOST((Record* const tmp = reinterpret_cast<Record*>(
+                           m_record_bits & ~DO_NOT_DEREF_FLAG);
+                       return (tmp ? tmp->use_count() : 0);))
+
+    KOKKOS_IF_ON_DEVICE((return 0;))
+  }
+
+  KOKKOS_INLINE_FUNCTION bool has_record() const {
+    return (m_record_bits & (~DO_NOT_DEREF_FLAG)) != 0;
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  void clear() {
+    // If this is tracking then must decrement
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT
+    // Reset to default constructed value.
+    m_record_bits = DO_NOT_DEREF_FLAG;
+  }
+
+  // Copy:
+  KOKKOS_FORCEINLINE_FUNCTION
+  ~SharedAllocationTracker(){KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT}
+
+  KOKKOS_FORCEINLINE_FUNCTION constexpr SharedAllocationTracker()
+      : m_record_bits(DO_NOT_DEREF_FLAG) {}
+
+  // Move:
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  SharedAllocationTracker(SharedAllocationTracker&& rhs)
+      : m_record_bits(rhs.m_record_bits) {
+    rhs.m_record_bits = DO_NOT_DEREF_FLAG;
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  SharedAllocationTracker& operator=(SharedAllocationTracker&& rhs) {
+    auto swap_tmp     = m_record_bits;
+    m_record_bits     = rhs.m_record_bits;
+    rhs.m_record_bits = swap_tmp;
+    return *this;
+  }
+
+  // Copy:
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  SharedAllocationTracker(const SharedAllocationTracker& rhs)
+      : m_record_bits(KOKKOS_IMPL_SHARED_ALLOCATION_CARRY_RECORD_BITS(
+            rhs, true)){KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT}
+
+        /** \brief  Copy construction may disable tracking. */
+        KOKKOS_FORCEINLINE_FUNCTION SharedAllocationTracker(
+            const SharedAllocationTracker& rhs, const bool enable_tracking)
+      : m_record_bits(KOKKOS_IMPL_SHARED_ALLOCATION_CARRY_RECORD_BITS(
+            rhs,
+            enable_tracking)){KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT}
+
+        KOKKOS_FORCEINLINE_FUNCTION SharedAllocationTracker
+        &
+        operator=(const SharedAllocationTracker& rhs) {
+    // If this is tracking then must decrement
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT
+    m_record_bits = KOKKOS_IMPL_SHARED_ALLOCATION_CARRY_RECORD_BITS(rhs, true);
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT
+    return *this;
+  }
+
+  /*  The following functions (assign_direct and assign_force_disable)
+   *  are the result of deconstructing the
+   *  KOKKOS_IMPL_SHARED_ALLOCATION_CARRY_RECORD_BITS macro.  This
+   *  allows the caller to do the check for tracking enabled and managed
+   *  apart from the assignment of the record because the tracking
+   *  enabled / managed question may be important for other tasks as well
+   */
+
+  /** \brief  Copy assignment without the carry bits logic
+   *         This assumes that externally defined tracking is explicitly enabled
+   */
+  KOKKOS_FORCEINLINE_FUNCTION
+  void assign_direct(const SharedAllocationTracker& rhs) {
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT
+    m_record_bits = rhs.m_record_bits;
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT
+  }
+
+  /** \brief  Copy assignment without the increment
+   *         we cannot assume that current record is unmanaged
+   *         but with externally defined tracking explicitly disabled
+   *         we can go straight to the do not deref flag     */
+  KOKKOS_FORCEINLINE_FUNCTION
+  void assign_force_disable(const SharedAllocationTracker& rhs) {
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT
+    m_record_bits = rhs.m_record_bits | DO_NOT_DEREF_FLAG;
+  }
+
+  // report if record is tracking or not
+  KOKKOS_FORCEINLINE_FUNCTION
+  bool tracking_enabled() { return (!(m_record_bits & DO_NOT_DEREF_FLAG)); }
+
+  /** \brief  Copy assignment may disable tracking */
+  KOKKOS_FORCEINLINE_FUNCTION
+  void assign(const SharedAllocationTracker& rhs, const bool enable_tracking) {
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT
+    m_record_bits =
+        KOKKOS_IMPL_SHARED_ALLOCATION_CARRY_RECORD_BITS(rhs, enable_tracking);
+    KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT
+  }
+
+#undef KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_INCREMENT
+#undef KOKKOS_IMPL_SHARED_ALLOCATION_TRACKER_DECREMENT
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc_timpl.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SharedAlloc_timpl.hpp
new file mode 100644 (file)
index 0000000..276217c
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (12/8/20) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_SHAREDALLOC_TIMPL_HPP
+#define KOKKOS_IMPL_SHAREDALLOC_TIMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <impl/Kokkos_SharedAlloc.hpp>
+
+#include <Kokkos_HostSpace.hpp>  // used with HostInaccessible specializations
+
+#include <string>    // std::string
+#include <cstring>   // strncpy
+#include <iostream>  // ostream
+
+namespace Kokkos {
+namespace Impl {
+
+template <class MemorySpace>
+auto SharedAllocationRecordCommon<MemorySpace>::allocate(
+    MemorySpace const& arg_space, std::string const& arg_label,
+    size_t arg_alloc_size) -> derived_t* {
+  return new derived_t(arg_space, arg_label, arg_alloc_size);
+}
+
+template <class MemorySpace>
+void* SharedAllocationRecordCommon<MemorySpace>::allocate_tracked(
+    const MemorySpace& arg_space, const std::string& arg_alloc_label,
+    size_t arg_alloc_size) {
+  if (!arg_alloc_size) return nullptr;
+
+  SharedAllocationRecord* const r =
+      allocate(arg_space, arg_alloc_label, arg_alloc_size);
+
+  record_base_t::increment(r);
+
+  return r->data();
+}
+
+template <class MemorySpace>
+void SharedAllocationRecordCommon<MemorySpace>::deallocate(
+    SharedAllocationRecordCommon::record_base_t* arg_rec) {
+  delete static_cast<derived_t*>(arg_rec);
+}
+
+template <class MemorySpace>
+void SharedAllocationRecordCommon<MemorySpace>::deallocate_tracked(
+    void* arg_alloc_ptr) {
+  if (arg_alloc_ptr != nullptr) {
+    SharedAllocationRecord* const r = derived_t::get_record(arg_alloc_ptr);
+    record_base_t::decrement(r);
+  }
+}
+
+template <class MemorySpace>
+void* SharedAllocationRecordCommon<MemorySpace>::reallocate_tracked(
+    void* arg_alloc_ptr, size_t arg_alloc_size) {
+  derived_t* const r_old = derived_t::get_record(arg_alloc_ptr);
+  derived_t* const r_new =
+      allocate(r_old->m_space, r_old->get_label(), arg_alloc_size);
+
+  Kokkos::Impl::DeepCopy<MemorySpace, MemorySpace>(
+      r_new->data(), r_old->data(), std::min(r_old->size(), r_new->size()));
+  Kokkos::fence(
+      "SharedAllocationRecord<Kokkos::Experimental::HBWSpace, "
+      "void>::reallocate_tracked(): fence after copying data");
+
+  record_base_t::increment(r_new);
+  record_base_t::decrement(r_old);
+
+  return r_new->data();
+}
+
+template <class MemorySpace>
+auto SharedAllocationRecordCommon<MemorySpace>::get_record(void* alloc_ptr)
+    -> derived_t* {
+  using Header = SharedAllocationHeader;
+
+  Header const* const h = alloc_ptr ? Header::get_header(alloc_ptr) : nullptr;
+
+  if (!alloc_ptr || h->m_record->m_alloc_ptr != h) {
+    Kokkos::Impl::throw_runtime_exception(
+        std::string("Kokkos::Impl::SharedAllocationRecordCommon<") +
+        std::string(MemorySpace::name()) +
+        std::string(">::get_record() ERROR"));
+  }
+
+  return static_cast<derived_t*>(h->m_record);
+}
+
+template <class MemorySpace>
+std::string SharedAllocationRecordCommon<MemorySpace>::get_label() const {
+  return record_base_t::m_label;
+}
+
+template <class MemorySpace>
+void SharedAllocationRecordCommon<MemorySpace>::
+    _fill_host_accessible_header_info(SharedAllocationHeader& arg_header,
+                                      std::string const& arg_label) {
+  // Fill in the Header information, directly accessible on the host
+
+  arg_header.m_record = &self();
+
+  strncpy(arg_header.m_label, arg_label.c_str(),
+          SharedAllocationHeader::maximum_label_length);
+  // Set last element zero, in case c_str is too long
+  arg_header.m_label[SharedAllocationHeader::maximum_label_length - 1] = '\0';
+}
+
+template <class MemorySpace>
+void SharedAllocationRecordCommon<MemorySpace>::print_records(
+    std::ostream& s, const MemorySpace&, bool detail) {
+  (void)s;
+  (void)detail;
+#ifdef KOKKOS_ENABLE_DEBUG
+  SharedAllocationRecord<void, void>::print_host_accessible_records(
+      s, MemorySpace::name(), &derived_t::s_root_record, detail);
+#else
+  Kokkos::Impl::throw_runtime_exception(
+      std::string("SharedAllocationHeader<") +
+      std::string(MemorySpace::name()) +
+      std::string(
+          ">::print_records only works with KOKKOS_ENABLE_DEBUG enabled"));
+#endif
+}
+
+template <class MemorySpace>
+void HostInaccessibleSharedAllocationRecordCommon<MemorySpace>::print_records(
+    std::ostream& s, const MemorySpace&, bool detail) {
+  (void)s;
+  (void)detail;
+#ifdef KOKKOS_ENABLE_DEBUG
+  SharedAllocationRecord<void, void>* r = &derived_t::s_root_record;
+
+  char buffer[256];
+
+  SharedAllocationHeader head;
+
+  if (detail) {
+    do {
+      if (r->m_alloc_ptr) {
+        Kokkos::Impl::DeepCopy<HostSpace, MemorySpace>(
+            &head, r->m_alloc_ptr, sizeof(SharedAllocationHeader));
+        Kokkos::fence(
+            "HostInaccessibleSharedAllocationRecordCommon::print_records(): "
+            "fence after copying header to HostSpace");
+      } else {
+        head.m_label[0] = 0;
+      }
+
+      // Formatting dependent on sizeof(uintptr_t)
+      const char* format_string;
+
+      if (sizeof(uintptr_t) == sizeof(unsigned long)) {
+        format_string =
+            "%s addr( 0x%.12lx ) list( 0x%.12lx 0x%.12lx ) extent[ 0x%.12lx "
+            "+ %.8ld ] count(%d) dealloc(0x%.12lx) %s\n";
+      } else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
+        format_string =
+            "%s addr( 0x%.12llx ) list( 0x%.12llx 0x%.12llx ) extent[ "
+            "0x%.12llx + %.8ld ] count(%d) dealloc(0x%.12llx) %s\n";
+      }
+
+      snprintf(buffer, 256, format_string, MemorySpace::execution_space::name(),
+               reinterpret_cast<uintptr_t>(r),
+               reinterpret_cast<uintptr_t>(r->m_prev),
+               reinterpret_cast<uintptr_t>(r->m_next),
+               reinterpret_cast<uintptr_t>(r->m_alloc_ptr), r->m_alloc_size,
+               r->m_count, reinterpret_cast<uintptr_t>(r->m_dealloc),
+               head.m_label);
+      s << buffer;
+      r = r->m_next;
+    } while (r != &derived_t::s_root_record);
+  } else {
+    do {
+      if (r->m_alloc_ptr) {
+        Kokkos::Impl::DeepCopy<HostSpace, MemorySpace>(
+            &head, r->m_alloc_ptr, sizeof(SharedAllocationHeader));
+        Kokkos::fence(
+            "HostInaccessibleSharedAllocationRecordCommon::print_records(): "
+            "fence after copying header to HostSpace");
+
+        // Formatting dependent on sizeof(uintptr_t)
+        const char* format_string;
+
+        if (sizeof(uintptr_t) == sizeof(unsigned long)) {
+          format_string = "%s [ 0x%.12lx + %ld ] %s\n";
+        } else if (sizeof(uintptr_t) == sizeof(unsigned long long)) {
+          format_string = "%s [ 0x%.12llx + %ld ] %s\n";
+        }
+
+        snprintf(
+            buffer, 256, format_string, MemorySpace::execution_space::name(),
+            reinterpret_cast<uintptr_t>(r->data()), r->size(), head.m_label);
+      } else {
+        snprintf(buffer, 256, "%s [ 0 + 0 ]\n",
+                 MemorySpace::execution_space::name());
+      }
+      s << buffer;
+      r = r->m_next;
+    } while (r != &derived_t::s_root_record);
+  }
+#else
+  Kokkos::Impl::throw_runtime_exception(
+      std::string("SharedAllocationHeader<") +
+      std::string(MemorySpace::name()) +
+      std::string(
+          ">::print_records only works with KOKKOS_ENABLE_DEBUG enabled"));
+#endif
+}
+
+template <class MemorySpace>
+auto HostInaccessibleSharedAllocationRecordCommon<MemorySpace>::get_record(
+    void* alloc_ptr) -> derived_t* {
+  // Copy the header from the allocation
+  SharedAllocationHeader head;
+
+  SharedAllocationHeader const* const head_cuda =
+      alloc_ptr ? SharedAllocationHeader::get_header(alloc_ptr) : nullptr;
+
+  if (alloc_ptr) {
+    typename MemorySpace::execution_space exec_space;
+    Kokkos::Impl::DeepCopy<HostSpace, MemorySpace, decltype(exec_space)>(
+        exec_space, &head, head_cuda, sizeof(SharedAllocationHeader));
+    exec_space.fence(
+        "HostInaccessibleSharedAllocationRecordCommon::get_record(): fence "
+        "after copying header to HostSpace");
+  }
+
+  derived_t* const record =
+      alloc_ptr ? static_cast<derived_t*>(head.m_record) : nullptr;
+
+  if (!alloc_ptr || record->m_alloc_ptr != head_cuda) {
+    Kokkos::Impl::throw_runtime_exception(
+        std::string("Kokkos::Impl::SharedAllocationRecord<") +
+        std::string(MemorySpace::name()) +
+        std::string(", void>::get_record ERROR"));
+  }
+
+  return record;
+}
+
+template <class MemorySpace>
+std::string
+HostInaccessibleSharedAllocationRecordCommon<MemorySpace>::get_label() const {
+  return record_base_t::m_label;
+}
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_IMPL_SHAREDALLOC_TIMPL_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SimpleTaskScheduler.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SimpleTaskScheduler.hpp
new file mode 100644 (file)
index 0000000..06bfe27
--- /dev/null
@@ -0,0 +1,494 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SIMPLETASKSCHEDULER_HPP
+#define KOKKOS_SIMPLETASKSCHEDULER_HPP
+
+//----------------------------------------------------------------------------
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_TaskScheduler_fwd.hpp>
+//----------------------------------------------------------------------------
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <Kokkos_Future.hpp>
+#include <impl/Kokkos_TaskQueue.hpp>
+#include <impl/Kokkos_SingleTaskQueue.hpp>
+#include <impl/Kokkos_MultipleTaskQueue.hpp>
+#include <impl/Kokkos_TaskQueueMultiple.hpp>
+#include <impl/Kokkos_TaskPolicyData.hpp>
+#include <impl/Kokkos_TaskTeamMember.hpp>
+#include <impl/Kokkos_EBO.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+
+// TODO @tasking @cleanup move this
+template <class T>
+struct DefaultDestroy {
+  T* managed_object;
+  KOKKOS_FUNCTION
+  void destroy_shared_allocation() { managed_object->~T(); }
+};
+
+}  // namespace Impl
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class ExecSpace, class QueueType>
+// requires ExecutionSpace<ExecSpace> && TaskQueue<QueueType>
+class SimpleTaskScheduler
+    : public Impl::TaskSchedulerBase,
+      private Impl::ExecutionSpaceInstanceStorage<ExecSpace>,
+      private Impl::MemorySpaceInstanceStorage<
+          typename QueueType::memory_space>,
+      private Impl::NoUniqueAddressMemberEmulation<
+          typename QueueType::team_scheduler_info_type> {
+ public:
+  // TODO @tasking @generalization (maybe?) don't force QueueType to be complete
+  // here
+
+  using scheduler_type  = SimpleTaskScheduler;  // tag as scheduler concept
+  using execution_space = ExecSpace;
+  using task_queue_type = QueueType;
+  using memory_space    = typename task_queue_type::memory_space;
+  using memory_pool     = typename task_queue_type::memory_pool;
+
+  using team_scheduler_info_type =
+      typename task_queue_type::team_scheduler_info_type;
+  using task_scheduling_info_type =
+      typename task_queue_type::task_scheduling_info_type;
+  using specialization = Impl::TaskQueueSpecialization<SimpleTaskScheduler>;
+  using member_type    = typename specialization::member_type;
+
+  template <class Functor>
+  using runnable_task_type =
+      typename QueueType::template runnable_task_type<Functor,
+                                                      SimpleTaskScheduler>;
+
+  using task_base_type = typename task_queue_type::task_base_type;
+  using runnable_task_base_type =
+      typename task_queue_type::runnable_task_base_type;
+
+  using task_queue_traits = typename QueueType::task_queue_traits;
+
+  template <class ValueType>
+  using future_type = Kokkos::BasicFuture<ValueType, SimpleTaskScheduler>;
+  template <class FunctorType>
+  using future_type_for_functor = future_type<typename FunctorType::value_type>;
+
+ private:
+  template <typename, typename>
+  friend class BasicFuture;
+
+  using track_type = Kokkos::Impl::SharedAllocationTracker;
+  using execution_space_storage =
+      Impl::ExecutionSpaceInstanceStorage<execution_space>;
+  using memory_space_storage = Impl::MemorySpaceInstanceStorage<memory_space>;
+  using team_scheduler_info_storage =
+      Impl::NoUniqueAddressMemberEmulation<team_scheduler_info_type>;
+
+  track_type m_track;
+  task_queue_type* m_queue = nullptr;
+
+  KOKKOS_INLINE_FUNCTION
+  static constexpr task_base_type* _get_task_ptr(std::nullptr_t) {
+    return nullptr;
+  }
+
+  template <class ValueType>
+  KOKKOS_INLINE_FUNCTION static constexpr task_base_type* _get_task_ptr(
+      future_type<ValueType>&& f) {
+    return f.m_task;
+  }
+
+  template <int TaskEnum, class DepTaskType, class FunctorType>
+  KOKKOS_FUNCTION future_type_for_functor<std::decay_t<FunctorType>>
+  _spawn_impl(
+      DepTaskType arg_predecessor_task, TaskPriority arg_priority,
+      typename runnable_task_base_type::function_type apply_function_ptr,
+      typename runnable_task_base_type::destroy_type /*destroy_function_ptr*/,
+      FunctorType&& functor) {
+    KOKKOS_EXPECTS(m_queue != nullptr);
+
+    using functor_future_type =
+        future_type_for_functor<std::decay_t<FunctorType>>;
+    using task_type =
+        typename task_queue_type::template runnable_task_type<FunctorType,
+                                                              scheduler_type>;
+
+    // Reference count starts at two:
+    //   +1 for the matching decrement when task is complete
+    //   +1 for the future
+    auto& runnable_task = *m_queue->template allocate_and_construct<task_type>(
+        /* functor = */ std::forward<FunctorType>(functor),
+        /* apply_function_ptr = */ apply_function_ptr,
+        /* task_type = */ static_cast<Impl::TaskType>(TaskEnum),
+        /* priority = */ arg_priority,
+        /* queue_base = */ m_queue,
+        /* initial_reference_count = */ 2);
+
+    if (arg_predecessor_task != nullptr) {
+      m_queue->initialize_scheduling_info_from_predecessor(
+          runnable_task, *arg_predecessor_task);
+      runnable_task.set_predecessor(*arg_predecessor_task);
+      arg_predecessor_task->decrement_and_check_reference_count();
+    } else {
+      m_queue->initialize_scheduling_info_from_team_scheduler_info(
+          runnable_task, team_scheduler_info());
+    }
+
+    auto rv = functor_future_type(&runnable_task);
+
+    Kokkos::memory_fence();  // fence to ensure dependent stores are visible
+
+    m_queue->schedule_runnable(std::move(runnable_task), team_scheduler_info());
+    // note that task may be already completed even here, so don't touch it
+    // again
+
+    return rv;
+  }
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Constructors, destructor, and assignment"> {{{2
+
+  SimpleTaskScheduler() = default;
+
+  explicit SimpleTaskScheduler(execution_space const& arg_execution_space,
+                               memory_space const& arg_memory_space,
+                               memory_pool const& arg_memory_pool)
+      : execution_space_storage(arg_execution_space),
+        memory_space_storage(arg_memory_space) {
+    // Ask the task queue how much space it needs (usually will just be
+    // sizeof(task_queue_type), but some queues may need additional storage
+    // dependent on runtime conditions or properties of the execution space)
+    auto const allocation_size = task_queue_type::task_queue_allocation_size(
+        arg_execution_space, arg_memory_space, arg_memory_pool);
+
+    // TODO @tasking @generalization DSH better encapsulation of the
+    // SharedAllocationRecord pattern
+    using record_type =
+        Impl::SharedAllocationRecord<memory_space,
+                                     Impl::DefaultDestroy<task_queue_type>>;
+
+    // Allocate space for the task queue
+    auto* record = record_type::allocate(memory_space(), "Kokkos::TaskQueue",
+                                         allocation_size);
+    m_queue      = new (record->data())
+        task_queue_type(arg_execution_space, arg_memory_space, arg_memory_pool);
+    record->m_destroy.managed_object = m_queue;
+    m_track.assign_allocated_record_to_uninitialized(record);
+  }
+
+  explicit SimpleTaskScheduler(execution_space const& arg_execution_space,
+                               memory_pool const& pool)
+      : SimpleTaskScheduler(arg_execution_space, memory_space{},
+                            pool) { /* forwarding ctor, must be empty */
+  }
+
+  explicit SimpleTaskScheduler(memory_pool const& pool)
+      : SimpleTaskScheduler(execution_space{}, memory_space{},
+                            pool) { /* forwarding ctor, must be empty */
+  }
+
+  SimpleTaskScheduler(memory_space const& arg_memory_space,
+                      size_t const mempool_capacity,
+                      unsigned const mempool_min_block_size,  // = 1u << 6
+                      unsigned const mempool_max_block_size,  // = 1u << 10
+                      unsigned const mempool_superblock_size  // = 1u << 12
+                      )
+      : SimpleTaskScheduler(
+            execution_space{}, arg_memory_space,
+            memory_pool(
+                arg_memory_space, mempool_capacity, mempool_min_block_size,
+                mempool_max_block_size,
+                mempool_superblock_size)) { /* forwarding ctor, must be empty */
+  }
+
+  // </editor-fold> end Constructors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  // Note that this is an expression of shallow constness
+  KOKKOS_INLINE_FUNCTION
+  task_queue_type& queue() const {
+    KOKKOS_EXPECTS(m_queue != nullptr);
+    return *m_queue;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  SimpleTaskScheduler get_team_scheduler(int rank_in_league) const noexcept {
+    KOKKOS_EXPECTS(m_queue != nullptr);
+    auto rv = SimpleTaskScheduler{*this};
+    rv.team_scheduler_info() =
+        m_queue->initial_team_scheduler_info(rank_in_league);
+    return rv;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  execution_space const& get_execution_space() const {
+    return this->execution_space_instance();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  team_scheduler_info_type& team_scheduler_info() & {
+    return this->team_scheduler_info_storage::no_unique_address_data_member();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  team_scheduler_info_type const& team_scheduler_info() const& {
+    return this->team_scheduler_info_storage::no_unique_address_data_member();
+  }
+
+  //----------------------------------------------------------------------------
+
+  template <int TaskEnum, typename DepFutureType, typename FunctorType>
+  KOKKOS_FUNCTION static Kokkos::BasicFuture<typename FunctorType::value_type,
+                                             scheduler_type>
+  spawn(Impl::TaskPolicyWithScheduler<TaskEnum, scheduler_type, DepFutureType>&&
+            arg_policy,
+        typename runnable_task_base_type::function_type arg_function,
+        typename runnable_task_base_type::destroy_type arg_destroy,
+        FunctorType&& arg_functor) {
+    return std::move(arg_policy.scheduler())
+        .template _spawn_impl<TaskEnum>(
+            _get_task_ptr(std::move(arg_policy.predecessor())),
+            arg_policy.priority(), arg_function, arg_destroy,
+            std::forward<FunctorType>(arg_functor));
+  }
+
+  template <int TaskEnum, typename DepFutureType, typename FunctorType>
+  KOKKOS_FUNCTION Kokkos::BasicFuture<typename FunctorType::value_type,
+                                      scheduler_type>
+  spawn(Impl::TaskPolicyWithPredecessor<TaskEnum, DepFutureType>&& arg_policy,
+        FunctorType&& arg_functor) {
+    static_assert(std::is_same<typename DepFutureType::scheduler_type,
+                               scheduler_type>::value,
+                  "Can't create a task policy from a scheduler and a future "
+                  "from a different scheduler");
+
+    using task_type = runnable_task_type<FunctorType>;
+    typename task_type::function_type const ptr = task_type::apply;
+    typename task_type::destroy_type const dtor = task_type::destroy;
+
+    return _spawn_impl<TaskEnum>(std::move(arg_policy).predecessor().m_task,
+                                 arg_policy.priority(), ptr, dtor,
+                                 std::forward<FunctorType>(arg_functor));
+  }
+
+  template <class FunctorType, class ValueType, class Scheduler>
+  KOKKOS_FUNCTION static void respawn(
+      FunctorType* functor,
+      BasicFuture<ValueType, Scheduler> const& predecessor,
+      TaskPriority priority = TaskPriority::Regular) {
+    using task_type =
+        typename task_queue_type::template runnable_task_type<FunctorType,
+                                                              scheduler_type>;
+
+    auto& task = *static_cast<task_type*>(functor);
+
+    KOKKOS_EXPECTS(!task.get_respawn_flag());
+
+    task.set_priority(priority);
+    task.set_predecessor(*predecessor.m_task);
+    task.set_respawn_flag(true);
+  }
+
+  template <class FunctorType>
+  KOKKOS_FUNCTION static void respawn(
+      FunctorType* functor, scheduler_type const&,
+      TaskPriority priority = TaskPriority::Regular) {
+    using task_type =
+        typename task_queue_type::template runnable_task_type<FunctorType,
+                                                              scheduler_type>;
+
+    auto& task = *static_cast<task_type*>(functor);
+
+    KOKKOS_EXPECTS(!task.get_respawn_flag());
+
+    task.set_priority(priority);
+    KOKKOS_ASSERT(!task.has_predecessor());
+    task.set_respawn_flag(true);
+  }
+
+  template <class ValueType>
+  KOKKOS_FUNCTION future_type<void> when_all(
+      BasicFuture<ValueType, scheduler_type> const predecessors[],
+      int n_predecessors) {
+    // TODO @tasking @generalization DSH propagate scheduling info
+
+    using task_type = typename task_queue_type::aggregate_task_type;
+
+    future_type<void> rv;
+
+    if (n_predecessors > 0) {
+      task_queue_type* queue_ptr = nullptr;
+
+      // Loop over the predecessors to find the queue and increment the
+      // reference counts
+      for (int i_pred = 0; i_pred < n_predecessors; ++i_pred) {
+        auto* predecessor_task_ptr = predecessors[i_pred].m_task;
+
+        if (predecessor_task_ptr != nullptr) {
+          // TODO @tasking @cleanup DSH figure out when this is allowed to be
+          // nullptr (if at all anymore)
+
+          // Increment reference count to track subsequent assignment.
+          // TODO @tasking @optimization DSH figure out if this reference count
+          // increment is necessary
+          predecessor_task_ptr->increment_reference_count();
+
+          // TODO @tasking @cleanup DSH we should just set a boolean here
+          // instead to make this more readable
+          queue_ptr = m_queue;
+        }
+
+      }  // end loop over predecessors
+
+      // This only represents a non-ready future if at least one of the
+      // predecessors has a task (and thus, a queue)
+      if (queue_ptr != nullptr) {
+        auto& q = *queue_ptr;
+
+        auto* aggregate_task_ptr =
+            q.template allocate_and_construct_with_vla_emulation<
+                task_type, task_base_type*>(
+                /* n_vla_entries = */ n_predecessors,
+                /* aggregate_predecessor_count = */ n_predecessors,
+                /* queue_base = */ &q,
+                /* initial_reference_count = */ 2);
+
+        rv = future_type<void>(aggregate_task_ptr);
+
+        for (int i_pred = 0; i_pred < n_predecessors; ++i_pred) {
+          aggregate_task_ptr->vla_value_at(i_pred) =
+              predecessors[i_pred].m_task;
+        }
+
+        Kokkos::memory_fence();  // we're touching very questionable memory, so
+                                 // be sure to fence
+
+        q.schedule_aggregate(std::move(*aggregate_task_ptr),
+                             team_scheduler_info());
+        // the aggregate may be processed at any time, so don't touch it after
+        // this
+      }
+    }
+
+    return rv;
+  }
+
+  template <class F>
+  KOKKOS_FUNCTION future_type<void> when_all(int n_calls, F&& func) {
+    // TODO @tasking @generalization DSH propagate scheduling info?
+
+    // later this should be std::invoke_result_t
+    using generated_type = decltype(func(0));
+    using task_type      = typename task_queue_type::aggregate_task_type;
+
+    static_assert(is_future<generated_type>::value,
+                  "when_all function must return a Kokkos future (an instance "
+                  "of Kokkos::BasicFuture)");
+    static_assert(
+        std::is_base_of<scheduler_type,
+                        typename generated_type::scheduler_type>::value,
+        "when_all function must return a Kokkos::BasicFuture of a compatible "
+        "scheduler type");
+
+    auto* aggregate_task =
+        m_queue->template allocate_and_construct_with_vla_emulation<
+            task_type, task_base_type*>(
+            /* n_vla_entries = */ n_calls,
+            /* aggregate_predecessor_count = */ n_calls,
+            /* queue_base = */ m_queue,
+            /* initial_reference_count = */ 2);
+
+    auto rv = future_type<void>(aggregate_task);
+
+    for (int i_call = 0; i_call < n_calls; ++i_call) {
+      auto generated_future = func(i_call);
+
+      if (generated_future.m_task != nullptr) {
+        generated_future.m_task->increment_reference_count();
+        aggregate_task->vla_value_at(i_call) = generated_future.m_task;
+
+        KOKKOS_ASSERT(m_queue ==
+                          generated_future.m_task->ready_queue_base_ptr() &&
+                      "Queue mismatch in when_all");
+      }
+    }
+
+    Kokkos::memory_fence();
+
+    m_queue->schedule_aggregate(std::move(*aggregate_task),
+                                team_scheduler_info());
+    // This could complete at any moment, so don't touch anything after this
+
+    return rv;
+  }
+};
+
+template <class ExecSpace, class QueueType>
+inline void wait(SimpleTaskScheduler<ExecSpace, QueueType> const& scheduler) {
+  using scheduler_type = SimpleTaskScheduler<ExecSpace, QueueType>;
+  scheduler_type::specialization::execute(scheduler);
+}
+
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//---------------------------------------------------------------------------#endif
+///* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_SIMPLETASKSCHEDULER_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SingleTaskQueue.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_SingleTaskQueue.hpp
new file mode 100644 (file)
index 0000000..aa84fbb
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_SINGLETASKQUEUE_HPP
+#define KOKKOS_IMPL_SINGLETASKQUEUE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <impl/Kokkos_TaskBase.hpp>
+#include <impl/Kokkos_TaskResult.hpp>
+
+#include <impl/Kokkos_TaskQueueMemoryManager.hpp>
+#include <impl/Kokkos_TaskQueueCommon.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_LIFO.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class ExecSpace, class MemorySpace, class TaskQueueTraits,
+          class MemoryPool>
+class SingleTaskQueue
+    : public TaskQueueMemoryManager<ExecSpace, MemorySpace, MemoryPool>,
+      public TaskQueueCommonMixin<SingleTaskQueue<
+          ExecSpace, MemorySpace, TaskQueueTraits, MemoryPool>> {
+ private:
+  using base_t = TaskQueueMemoryManager<ExecSpace, MemorySpace, MemoryPool>;
+  using common_mixin_t = TaskQueueCommonMixin<SingleTaskQueue>;
+
+  struct EmptyTeamSchedulerInfo {};
+  struct EmptyTaskSchedulingInfo {};
+
+ public:
+  using task_queue_type   = SingleTaskQueue;  // mark as task_queue concept
+  using task_queue_traits = TaskQueueTraits;
+  using task_base_type    = TaskNode<TaskQueueTraits>;
+  using ready_queue_type =
+      typename TaskQueueTraits::template ready_queue_type<task_base_type>;
+
+  using team_scheduler_info_type  = EmptyTeamSchedulerInfo;
+  using task_scheduling_info_type = EmptyTaskSchedulingInfo;
+
+  using runnable_task_base_type = RunnableTaskBase<TaskQueueTraits>;
+
+  template <class Functor, class Scheduler>
+  // requires TaskScheduler<Scheduler> && TaskFunctor<Functor>
+  using runnable_task_type =
+      RunnableTask<task_queue_traits, Scheduler, typename Functor::value_type,
+                   Functor>;
+
+  using aggregate_task_type =
+      AggregateTask<task_queue_traits, task_scheduling_info_type>;
+
+  // Number of allowed priorities
+  static constexpr int NumQueue = 3;
+
+ private:
+  ready_queue_type m_ready_queues[NumQueue][2];
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Constructors, destructors, and assignment"> {{{2
+
+  SingleTaskQueue()                       = delete;
+  SingleTaskQueue(SingleTaskQueue const&) = delete;
+  SingleTaskQueue(SingleTaskQueue&&)      = delete;
+  SingleTaskQueue& operator=(SingleTaskQueue const&) = delete;
+  SingleTaskQueue& operator=(SingleTaskQueue&&) = delete;
+
+  explicit SingleTaskQueue(typename base_t::execution_space const&,
+                           typename base_t::memory_space const&,
+                           typename base_t::memory_pool const& arg_memory_pool)
+      : base_t(arg_memory_pool) {}
+
+  ~SingleTaskQueue() {
+    for (int i_priority = 0; i_priority < NumQueue; ++i_priority) {
+      KOKKOS_EXPECTS(m_ready_queues[i_priority][TaskTeam].empty());
+      KOKKOS_EXPECTS(m_ready_queues[i_priority][TaskSingle].empty());
+    }
+  }
+
+  // </editor-fold> end Constructors, destructors, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  KOKKOS_FUNCTION
+  void schedule_runnable(runnable_task_base_type&& task,
+                         team_scheduler_info_type const& info) {
+    this->schedule_runnable_to_queue(
+        std::move(task),
+        m_ready_queues[int(task.get_priority())][int(task.get_task_type())],
+        info);
+    // Task may be enqueued and may be run at any point; don't touch it (hence
+    // the use of move semantics)
+  }
+
+  KOKKOS_FUNCTION
+  OptionalRef<task_base_type> pop_ready_task(
+      team_scheduler_info_type const& /*info*/) {
+    OptionalRef<task_base_type> return_value;
+    // always loop in order of priority first, then prefer team tasks over
+    // single tasks
+    for (int i_priority = 0; i_priority < NumQueue; ++i_priority) {
+      // Check for a team task with this priority
+      return_value = m_ready_queues[i_priority][TaskTeam].pop();
+      if (return_value) return return_value;
+
+      // Check for a single task with this priority
+      return_value = m_ready_queues[i_priority][TaskSingle].pop();
+      if (return_value) return return_value;
+    }
+    // if nothing was found, return a default-constructed (empty) OptionalRef
+    return return_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr team_scheduler_info_type initial_team_scheduler_info(int) const
+      noexcept {
+    return {};
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_SINGLETASKQUEUE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Spinwait.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Spinwait.cpp
new file mode 100644 (file)
index 0000000..d095429
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include <Kokkos_Macros.hpp>
+
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_Spinwait.hpp>
+#include <impl/Kokkos_BitOps.hpp>
+
+#include <thread>
+#if defined(_WIN32)
+#include <process.h>
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace Impl {
+
+void host_thread_yield(const uint32_t i, const WaitMode mode) {
+  static constexpr uint32_t sleep_limit = 1 << 13;
+  static constexpr uint32_t yield_limit = 1 << 12;
+
+  const int c = int_log2(i);
+
+  if (WaitMode::ROOT != mode) {
+    if (sleep_limit < i) {
+      // Attempt to put the thread to sleep for 'c' microseconds
+      std::this_thread::yield();
+      std::this_thread::sleep_for(std::chrono::microseconds(c));
+    }
+
+    else if (mode == WaitMode::PASSIVE || yield_limit < i) {
+      // Attempt to yield thread resources to runtime
+      std::this_thread::yield();
+    }
+#if defined(KOKKOS_ENABLE_ASM)
+
+    else if ((1u << 4) < i) {
+
+      // Insert a few no-ops to quiet the thread:
+
+      for (int k = 0; k < c; ++k) {
+#if defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
+    defined(__x86_64__)
+#if !defined(_WIN32) /* IS NOT Microsoft Windows */
+        asm volatile("nop\n");
+#else
+        __asm__ __volatile__("nop\n");
+#endif
+#elif defined(__PPC64__)
+        asm volatile("nop\n");
+#endif
+      }
+    }
+#endif /* defined( KOKKOS_ENABLE_ASM ) */
+  }
+#if defined(KOKKOS_ENABLE_ASM)
+  else if ((1u << 3) < i) {
+    // no-ops for root thread
+    for (int k = 0; k < c; ++k) {
+#if defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
+    defined(__x86_64__)
+#if !defined(_WIN32) /* IS NOT Microsoft Windows */
+      asm volatile("nop\n");
+#else
+      __asm__ __volatile__("nop\n");
+#endif
+#elif defined(__PPC64__)
+      asm volatile("nop\n");
+#endif
+    }
+  }
+
+  {
+    // Insert memory pause
+#if defined(__amd64) || defined(__amd64__) || defined(__x86_64) || \
+    defined(__x86_64__)
+#if !defined(_WIN32) /* IS NOT Microsoft Windows */
+    asm volatile("pause\n" ::: "memory");
+#else
+    __asm__ __volatile__("pause\n" ::: "memory");
+#endif
+#elif defined(__PPC64__)
+    asm volatile("or 27, 27, 27" ::: "memory");
+#endif
+  }
+
+#endif /* defined( KOKKOS_ENABLE_ASM ) */
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Spinwait.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Spinwait.hpp
new file mode 100644 (file)
index 0000000..0851575
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SPINWAIT_HPP
+#define KOKKOS_SPINWAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Atomic.hpp>
+
+#include <cstdint>
+
+#include <type_traits>
+
+namespace Kokkos {
+namespace Impl {
+
+enum class WaitMode : int {
+  ACTIVE  // Used for tight loops to keep threads active longest
+  ,
+  PASSIVE  // Used to quickly yield the thread to quite down the system
+  ,
+  ROOT  // Never sleep or yield the root thread
+};
+
+void host_thread_yield(const uint32_t i, const WaitMode mode);
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, void> root_spinwait_while_equal(
+    T const volatile& flag, const T value) {
+  Kokkos::store_fence();
+  uint32_t i = 0;
+  while (value == flag) {
+    host_thread_yield(++i, WaitMode::ROOT);
+  }
+  Kokkos::load_fence();
+}
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, void> root_spinwait_until_equal(
+    T const volatile& flag, const T value) {
+  Kokkos::store_fence();
+  uint32_t i = 0;
+  while (value != flag) {
+    host_thread_yield(++i, WaitMode::ROOT);
+  }
+  Kokkos::load_fence();
+}
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, void> spinwait_while_equal(
+    T const volatile& flag, const T value) {
+  Kokkos::store_fence();
+  uint32_t i = 0;
+  while (value == flag) {
+    host_thread_yield(++i, WaitMode::ACTIVE);
+  }
+  Kokkos::load_fence();
+}
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, void> yield_while_equal(
+    T const volatile& flag, const T value) {
+  Kokkos::store_fence();
+  uint32_t i = 0;
+  while (value == flag) {
+    host_thread_yield(++i, WaitMode::PASSIVE);
+  }
+  Kokkos::load_fence();
+}
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, void> spinwait_until_equal(
+    T const volatile& flag, const T value) {
+  Kokkos::store_fence();
+  uint32_t i = 0;
+  while (value != flag) {
+    host_thread_yield(++i, WaitMode::ACTIVE);
+  }
+  Kokkos::load_fence();
+}
+
+template <typename T>
+std::enable_if_t<std::is_integral<T>::value, void> yield_until_equal(
+    T const volatile& flag, const T value) {
+  Kokkos::store_fence();
+  uint32_t i = 0;
+  while (value != flag) {
+    host_thread_yield(++i, WaitMode::PASSIVE);
+  }
+  Kokkos::load_fence();
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+#endif /* #ifndef KOKKOS_SPINWAIT_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Stacktrace.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Stacktrace.cpp
new file mode 100644 (file)
index 0000000..e1f59c1
--- /dev/null
@@ -0,0 +1,252 @@
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#include "Kokkos_Macros.hpp"
+#include "Kokkos_Stacktrace.hpp"
+
+#ifdef KOKKOS_IMPL_ENABLE_STACKTRACE
+// backtrace() function for retrieving the stacktrace
+#include <execinfo.h>
+#endif
+#ifdef KOKKOS_IMPL_ENABLE_CXXABI
+#include <cxxabi.h>
+#endif  // KOKKOS_ENABLE_CXXABI
+
+#include <exception>
+#include <iostream>
+#include <tuple>
+#include <vector>
+
+namespace Kokkos {
+namespace Impl {
+#ifndef KOKKOS_IMPL_ENABLE_STACKTRACE
+int backtrace(void**, int) { return 0; }
+char** backtrace_symbols(void* const*, int) { return nullptr; }
+#endif
+
+std::string demangle(const std::string& name) {
+#ifndef KOKKOS_IMPL_ENABLE_CXXABI
+  return name;
+#else
+  size_t found_end = name.find_first_of("+)", 0, 2);
+  if (found_end == std::string::npos) {
+    found_end = name.size();
+  }
+  size_t found_parenthesis = name.find_first_of("(");
+  size_t start             = found_parenthesis + 1;
+  if (found_parenthesis == std::string::npos) start = 0;
+
+  std::string s = name.substr(start, found_end - start);
+
+  if (s.length() != 0) {
+    int status          = 0;
+    char* output_buffer = nullptr;
+    size_t length       = s.length();
+    char* d = abi::__cxa_demangle(s.c_str(), output_buffer, &length, &status);
+    if (d != nullptr) {
+      s = d;
+      free(d);
+    }
+  }
+
+  // Special cases for "main" and "start" on Mac
+  if (s.length() == 0) {
+    if (name == "main" || name == "start") {
+      s = name;
+    }
+  }
+  return s;
+#endif  // KOKKOS_ENABLE_CXXABI
+}
+
+class Stacktrace {
+ public:
+  Stacktrace()                  = delete;
+  Stacktrace(const Stacktrace&) = delete;
+  Stacktrace& operator=(const Stacktrace&) = delete;
+  Stacktrace(Stacktrace&&)                 = delete;
+  Stacktrace& operator=(Stacktrace&&) = delete;
+  ~Stacktrace()                       = delete;
+
+  // These are public only to avoid wasting an extra stacktrace line.
+  // See save_stacktrace below.
+  static constexpr int capacity = 100;
+  static void* buffer[capacity];
+  static int length;
+
+  static std::vector<std::string> lines() {
+    char** symbols = backtrace_symbols(buffer, length);
+    if (symbols == nullptr) {
+      return {};
+    } else {
+      std::vector<std::string> trace(length);
+      for (int i = 0; i < length; ++i) {
+        if (symbols[i] != nullptr) {
+          trace[i] = std::string(symbols[i]);
+        }
+      }
+      free(symbols);
+      return trace;
+    }
+  }
+};
+
+int Stacktrace::length = 0;
+void* Stacktrace::buffer[Stacktrace::capacity];
+
+void save_stacktrace() {
+  Stacktrace::length = backtrace(Stacktrace::buffer, Stacktrace::capacity);
+}
+
+size_t find_first_non_whitespace(const std::string& s, const size_t start_pos) {
+  constexpr size_t num_ws_chars = 3;
+  const char ws_chars[]         = "\n\t ";
+  return s.find_first_not_of(ws_chars, start_pos, num_ws_chars);
+}
+
+size_t find_first_whitespace(const std::string& s, const size_t start_pos) {
+  constexpr size_t num_ws_chars = 3;
+  const char ws_chars[]         = "\n\t ";
+  return s.find_first_of(ws_chars, start_pos, num_ws_chars);
+}
+
+template <class Callback>
+void for_each_token(const std::string& s, Callback c) {
+  size_t cur = find_first_non_whitespace(s, 0);
+  while (cur != std::string::npos) {
+    const size_t end   = find_first_whitespace(s, cur);
+    const bool last    = (end == std::string::npos);
+    const size_t count = last ? end : size_t(end - cur);
+    c(s.substr(cur, count), last);
+    cur = find_first_non_whitespace(s, end);
+  }
+}
+
+// Search the whole backtrace, column by column, for "main".
+// This tells us what column has the function names.
+// While we're doing that, figure out the longest column,
+// so we can compute spacing correctly.
+
+struct main_column_info {
+  bool found_main;
+  size_t main_col;
+  std::vector<size_t> main_col_lens;
+};
+
+main_column_info find_main_column(const std::vector<std::string>& traceback) {
+  bool found_main = false;
+  size_t main_col = 0;
+  for (auto&& entry : traceback) {
+    size_t col_count = 0;
+    for_each_token(entry, [&](const std::string& s, bool) {
+      const size_t pos = s.find("main");
+      if (pos != std::string::npos) {
+        found_main = true;
+        main_col   = col_count;
+      }
+      ++col_count;
+    });
+    if (found_main) {
+      break;
+    }
+  }
+
+  // Make another pass to get the column lengths.
+  // Only demangle the column of functions.
+  std::vector<size_t> max_col_lengths;
+  for (auto&& entry : traceback) {
+    size_t col_count = 0;
+    for_each_token(entry, [&](const std::string& s, bool) {
+      const size_t cur_col_len =
+          (found_main && col_count == main_col) ? demangle(s).size() : s.size();
+      ++col_count;
+      if (max_col_lengths.size() < col_count) {
+        max_col_lengths.push_back(cur_col_len);
+      } else {
+        const size_t old_max_len = max_col_lengths[col_count - 1];
+        if (old_max_len < cur_col_len) {
+          max_col_lengths[col_count - 1] = cur_col_len;
+        }
+      }
+    });
+  }
+  return main_column_info{found_main, main_col, max_col_lengths};
+}
+
+void demangle_and_print_traceback_entry(
+    std::ostream& out, const std::string& traceback_entry,
+    const bool found_main, const size_t main_col,
+    const std::vector<size_t>& max_col_lens) {
+  std::vector<std::string> tokens;
+  size_t cur_col = 0;
+  for_each_token(traceback_entry, [&](const std::string& s, bool last) {
+    const size_t old_width(out.width());
+    out.width(max_col_lens[cur_col]);
+    try {
+      if (found_main && cur_col == main_col) {
+        out << demangle(s);
+      } else {
+        out << s;
+      }
+      if (!last) {
+        out << " ";
+      }
+      ++cur_col;
+    } catch (...) {
+      out.width(old_width);
+      throw;
+    }
+    out.width(old_width);
+  });
+}
+
+void demangle_and_print_traceback(std::ostream& out,
+                                  const std::vector<std::string>& traceback) {
+  const auto result = find_main_column(traceback);
+  for (auto&& entry : traceback) {
+    demangle_and_print_traceback_entry(out, entry, result.found_main,
+                                       result.main_col, result.main_col_lens);
+    out << std::endl;
+  }
+}
+
+void print_saved_stacktrace(std::ostream& out) {
+  auto lines = Stacktrace::lines();
+  for (auto&& entry : lines) {
+    out << entry << std::endl;
+  }
+}
+
+void print_demangled_saved_stacktrace(std::ostream& out) {
+  demangle_and_print_traceback(out, Stacktrace::lines());
+}
+
+std::function<void()> user_terminate_handler_post_ = nullptr;
+
+void kokkos_terminate_handler() {
+  using std::cerr;
+  using std::endl;
+
+  cerr << "Kokkos observes that std::terminate has been called.  "
+          "Here is the last saved stack trace.  Note that this does not "
+          "necessarily show what called std::terminate."
+       << endl
+       << endl;
+  print_demangled_saved_stacktrace(std::cerr);
+
+  if (user_terminate_handler_post_ != nullptr) {
+    user_terminate_handler_post_();
+  } else {
+    std::abort();
+  }
+}
+
+void set_kokkos_terminate_handler(std::function<void()> user_post) {
+  user_terminate_handler_post_ = user_post;
+  std::set_terminate(kokkos_terminate_handler);
+}
+
+}  // namespace Impl
+}  // namespace Kokkos
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Stacktrace.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Stacktrace.hpp
new file mode 100644 (file)
index 0000000..b5cf4ee
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef KOKKOS_STACKTRACE_HPP
+#define KOKKOS_STACKTRACE_HPP
+
+#include <functional>
+#include <ostream>
+#include <string>
+
+namespace Kokkos {
+namespace Impl {
+
+/// \brief Return the demangled version of the input symbol, or the
+///   original input if demangling is not possible.
+std::string demangle(const std::string& name);
+
+/// \brief Save the current stacktrace.
+///
+/// You may only save one stacktrace at a time.  If you call this
+/// twice, the second call will overwrite the result of the first
+/// call.
+void save_stacktrace();
+
+/// \brief Print the raw form of the currently saved stacktrace, if
+///   any, to the given output stream.
+void print_saved_stacktrace(std::ostream& out);
+
+/// \brief Print the currently saved, demangled stacktrace, if any, to
+///   the given output stream.
+///
+/// Demangling is best effort only.
+void print_demangled_saved_stacktrace(std::ostream& out);
+
+/// \brief Set the std::terminate handler so that it prints the
+///   currently saved stack trace, then calls user_post.
+///
+/// This is useful if you want to call, say, MPI_Abort instead of
+/// std::abort.  The MPI Standard frowns upon calling MPI functions
+/// without including their header file, and Kokkos does not depend on
+/// MPI, so there's no way for Kokkos to depend on MPI_Abort in a
+/// portable way.
+void set_kokkos_terminate_handler(std::function<void()> user_post = nullptr);
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_STACKTRACE_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_StringManipulation.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_StringManipulation.hpp
new file mode 100644 (file)
index 0000000..644dcf7
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_STRING_MANIPULATION_HPP
+#define KOKKOS_STRING_MANIPULATION_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <cstddef>
+#include <type_traits>
+
+namespace Kokkos {
+namespace Impl {
+
+// This header provides a subset of the functionality from <cstring>.  In
+// contrast to the standard library header, functions are usable on the device
+// and in constant expressions.  It also includes functionality from <charconv>
+// to convert an integer value to a character sequence.
+
+//<editor-fold desc="String examination">
+// returns the length of a given string
+KOKKOS_INLINE_FUNCTION constexpr std::size_t strlen(const char *str) {
+  std::size_t i = 0;
+  while (str[i] != '\0') {
+    ++i;
+  }
+  return i;
+}
+
+// compares two strings
+KOKKOS_INLINE_FUNCTION constexpr int strcmp(const char *lhs, const char *rhs) {
+  while (*lhs == *rhs++) {
+    if (*lhs++ == '\0') {
+      return 0;
+    }
+  }
+  return static_cast<unsigned int>(*lhs) -
+         static_cast<unsigned int>(*(rhs - 1));
+}
+
+// compares a certain number of characters from two strings
+KOKKOS_INLINE_FUNCTION constexpr int strncmp(const char *lhs, const char *rhs,
+                                             std::size_t count) {
+  for (std::size_t i = 0; i < count; ++i) {
+    if (lhs[i] != rhs[i]) {
+      return lhs[i] < rhs[i] ? -1 : 1;
+    } else if (lhs[i] == '\0') {
+      return 0;
+    }
+  }
+  return 0;
+}
+//</editor-fold>
+
+//<editor-fold desc="String manipulation">
+// copies one string to another
+KOKKOS_INLINE_FUNCTION constexpr char *strcpy(char *dest, const char *src) {
+  char *d = dest;
+  for (; (*d = *src) != '\0'; ++d, ++src) {
+  }
+  return dest;
+}
+
+// copies a certain amount of characters from one string to another
+KOKKOS_INLINE_FUNCTION constexpr char *strncpy(char *dest, const char *src,
+                                               std::size_t count) {
+  if (count != 0) {
+    char *d = dest;
+    do {
+      if ((*d++ = *src++) == '\0') {
+        while (--count != 0) {
+          *d++ = '\0';
+        }
+        break;
+      }
+    } while (--count != 0);
+  }
+  return dest;
+}
+
+// concatenates two strings
+KOKKOS_INLINE_FUNCTION constexpr char *strcat(char *dest, const char *src) {
+  char *d = dest;
+  for (; *d != '\0'; ++d) {
+  }
+  while ((*d++ = *src++) != '\0') {
+  }
+  return dest;
+}
+
+// concatenates a certain amount of characters of two strings
+KOKKOS_INLINE_FUNCTION constexpr char *strncat(char *dest, const char *src,
+                                               std::size_t count) {
+  if (count != 0) {
+    char *d = dest;
+    for (; *d != '\0'; ++d) {
+    }
+    do {
+      if ((*d = *src++) == '\0') {
+        break;
+      }
+      d++;
+    } while (--count != 0);
+    *d = '\0';
+  }
+  return dest;
+}
+//</editor-fold>
+
+//<editor-fold desc="Character conversions">
+template <class Unsigned>
+KOKKOS_FUNCTION constexpr unsigned int to_chars_len(Unsigned val) {
+  unsigned int const base = 10;
+  static_assert(std::is_integral<Unsigned>::value, "implementation bug");
+  static_assert(std::is_unsigned<Unsigned>::value, "implementation bug");
+  unsigned int n = 1;
+  while (val >= base) {
+    val /= base;
+    ++n;
+  }
+  return n;
+}
+template <class Unsigned>
+KOKKOS_FUNCTION constexpr void to_chars_impl(char *first, unsigned int len,
+                                             Unsigned val) {
+  unsigned int const base = 10;
+  static_assert(std::is_integral<Unsigned>::value, "implementation bug");
+  static_assert(std::is_unsigned<Unsigned>::value, "implementation bug");
+  unsigned int pos = len - 1;
+  while (val > 0) {
+    auto const num = val % base;
+    val /= base;
+    first[pos] = '0' + num;
+    --pos;
+  }
+}
+
+// define values of portable error conditions that correspond to the POSIX error
+// codes
+enum class errc {
+  value_too_large = 75  // equivalent POSIX error is EOVERFLOW
+};
+struct to_chars_result {
+  char *ptr;
+  errc ec;
+};
+
+// converts an integer value to a character sequence
+template <class Integral>
+KOKKOS_FUNCTION constexpr to_chars_result to_chars_i(char *first, char *last,
+                                                     Integral value) {
+  using Unsigned = std::conditional_t<sizeof(Integral) <= sizeof(unsigned int),
+                                      unsigned int, unsigned long long>;
+  Unsigned unsigned_val = value;
+  if (value == 0) {
+    *first = '0';
+    return {first + 1, {}};
+  } else if
+#ifdef KOKKOS_ENABLE_CXX17
+      constexpr
+#endif
+      (std::is_signed<Integral>::value) {
+    if (value < 0) {
+      *first++     = '-';
+      unsigned_val = Unsigned(~value) + Unsigned(1);
+    }
+  }
+  unsigned int const len = to_chars_len(unsigned_val);
+  if (last - first < len) {
+    return {last, errc::value_too_large};
+  }
+  to_chars_impl(first, len, unsigned_val);
+  return {first + len, {}};
+}
+//</editor-fold>
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskBase.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskBase.hpp
new file mode 100644 (file)
index 0000000..bb89ab9
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_TASKBASE_HPP
+#define KOKKOS_IMPL_TASKBASE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <impl/Kokkos_LIFO.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/** \brief  Base class for task management, access, and execution.
+ *
+ *  Inheritance structure to allow static_cast from the task root type
+ *  and a task's FunctorType.
+ *
+ *    // Enable a functor to access the base class
+ *    // and provide memory for result value.
+ *    TaskBase< Space , ResultType , FunctorType >
+ *      : TaskBase< void , void , void >
+ *      , FunctorType
+ *      { ... };
+ *    Followed by memory allocated for result value.
+ *
+ *
+ *  States of a task:
+ *
+ *    Constructing State, NOT IN a linked list
+ *      m_wait == 0
+ *      m_next == 0
+ *
+ *    Scheduling transition : Constructing -> Waiting
+ *      before:
+ *        m_wait == 0
+ *        m_next == this task's initial dependence, 0 if none
+ *      after:
+ *        m_wait == EndTag
+ *        m_next == EndTag
+ *
+ *    Waiting State, IN a linked list
+ *      m_apply != 0
+ *      m_queue != 0
+ *      m_ref_count > 0
+ *      m_wait == head of linked list of tasks waiting on this task
+ *      m_next == next of linked list of tasks
+ *
+ *    transition : Waiting -> Executing
+ *      before:
+ *        m_next == EndTag
+ *      after::
+ *        m_next == LockTag
+ *
+ *    Executing State, NOT IN a linked list
+ *      m_apply != 0
+ *      m_queue != 0
+ *      m_ref_count > 0
+ *      m_wait == head of linked list of tasks waiting on this task
+ *      m_next == LockTag
+ *
+ *    Respawn transition : Executing -> Executing-Respawn
+ *      before:
+ *        m_next == LockTag
+ *      after:
+ *        m_next == this task's updated dependence, 0 if none
+ *
+ *    Executing-Respawn State, NOT IN a linked list
+ *      m_apply != 0
+ *      m_queue != 0
+ *      m_ref_count > 0
+ *      m_wait == head of linked list of tasks waiting on this task
+ *      m_next == this task's updated dependence, 0 if none
+ *
+ *    transition : Executing -> Complete
+ *      before:
+ *        m_wait == head of linked list
+ *      after:
+ *        m_wait == LockTag
+ *
+ *    Complete State, NOT IN a linked list
+ *      m_wait == LockTag: cannot add dependence (<=> complete)
+ *      m_next == LockTag: not a member of a wait queue
+ *
+ */
+class TaskBase {
+ public:
+  enum : int16_t { TaskTeam = 0, TaskSingle = 1, Aggregate = 2 };
+  enum : uintptr_t { LockTag = ~uintptr_t(0), EndTag = ~uintptr_t(1) };
+
+  template <typename, typename>
+  friend class Kokkos::BasicTaskScheduler;
+
+  using queue_type = TaskQueueBase;
+
+  using function_type = void (*)(TaskBase*, void*);
+  using destroy_type  = void (*)(TaskBase*);
+
+  // sizeof(TaskBase) == 48
+
+  function_type m_apply = nullptr;  ///< Apply function pointer
+  queue_type* m_queue   = nullptr;  ///< Pointer to the scheduler
+  TaskBase* m_next      = nullptr;  ///< next in linked list of ready tasks
+  TaskBase* m_wait      = nullptr;  ///< Queue of tasks waiting on this
+  int32_t m_ref_count   = 0;
+  int32_t m_alloc_size  = 0;
+  int32_t m_dep_count;  ///< Aggregate's number of dependences
+  int16_t m_task_type;  ///< Type of task
+  int16_t m_priority;   ///< Priority of runnable task
+
+  TaskBase(TaskBase&&)      = delete;
+  TaskBase(const TaskBase&) = delete;
+  TaskBase& operator=(TaskBase&&) = delete;
+  TaskBase& operator=(const TaskBase&) = delete;
+
+  KOKKOS_DEFAULTED_FUNCTION ~TaskBase() = default;
+
+  KOKKOS_INLINE_FUNCTION constexpr TaskBase()
+      : m_apply(nullptr),
+        m_queue(nullptr),
+        m_next(nullptr),
+        m_wait(nullptr),
+        m_ref_count(0),
+        m_alloc_size(0),
+        m_dep_count(0),
+        m_task_type(0),
+        m_priority(0) {}
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  TaskBase* volatile* aggregate_dependences() volatile {
+    return reinterpret_cast<TaskBase* volatile*>(this + 1);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool requested_respawn() {
+    // This should only be called when a task has finished executing and is
+    // in the transition to either the complete or executing-respawn state.
+    TaskBase* const lock = reinterpret_cast<TaskBase*>(LockTag);
+    return lock != m_next;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void add_dependence(TaskBase* dep) {
+    // Precondition: lock == m_next
+
+    auto* const lock = reinterpret_cast<TaskBase*>(LockTag);
+
+    // Assign dependence to m_next.  It will be processed in the subsequent
+    // call to schedule.  Error if the dependence is reset.
+    if (lock != Kokkos::Impl::desul_atomic_exchange(
+                    &m_next, dep, Kokkos::Impl::MemoryOrderSeqCst(),
+                    Kokkos::Impl::MemoryScopeDevice())) {
+      Kokkos::abort("TaskScheduler ERROR: resetting task dependence");
+    }
+    if (nullptr != dep) {
+      // The future may be destroyed upon returning from this call
+      // so increment reference count to track this assignment.
+      Kokkos::Impl::desul_atomic_inc(&(dep->m_ref_count),
+                                     Kokkos::Impl::MemoryOrderSeqCst(),
+                                     Kokkos::Impl::MemoryScopeDevice());
+    }
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  int32_t reference_count() const {
+    return *const_cast<int32_t volatile*>(&m_ref_count);
+  }
+};
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="Verify the size of TaskBase is as expected"> {{{2
+
+// Workaround: some compilers implement int16_t as 4 bytes, so the size might
+// not actually be 48 bytes.
+// There's not a lot of reason to keep checking this here; the program will
+// work fine if this isn't true. I think this check was originally here to
+// emphasize the fact that adding to the size of TaskBase could have a
+// significant performance penalty, since doing so could substantially decrease
+// the number of full task types that fit into a cache line.  We'll leave it
+// here for now, though, since we're probably going to be ripping all of the
+// old TaskBase stuff out eventually anyway.
+constexpr size_t unpadded_task_base_size = 44 + 2 * sizeof(int16_t);
+// don't forget padding:
+constexpr size_t task_base_misalignment =
+    unpadded_task_base_size % alignof(void*);
+constexpr size_t task_base_padding_size =
+    (alignof(void*) - task_base_misalignment) % alignof(void*);
+constexpr size_t expected_task_base_size =
+    unpadded_task_base_size + task_base_padding_size;
+
+// Produce a more readable compiler error message than the plain static assert
+template <size_t Size>
+struct verify_task_base_size_is_48_note_actual_size_is_ {};
+template <>
+struct verify_task_base_size_is_48_note_actual_size_is_<
+    expected_task_base_size> {
+  using type = int;
+};
+static constexpr
+    typename verify_task_base_size_is_48_note_actual_size_is_<sizeof(
+        TaskBase)>::type verify = {};
+
+static_assert(sizeof(TaskBase) == expected_task_base_size,
+              "Verifying expected sizeof(TaskBase)");
+
+// </editor-fold> end Verify the size of TaskBase is as expected }}}2
+//------------------------------------------------------------------------------
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Scheduler, typename ResultType, class FunctorType>
+class Task : public TaskBase, public FunctorType {
+ public:
+  Task()            = delete;
+  Task(Task&&)      = delete;
+  Task(const Task&) = delete;
+  Task& operator=(Task&&) = delete;
+  Task& operator=(const Task&) = delete;
+
+  using root_type    = TaskBase;
+  using functor_type = FunctorType;
+  using result_type  = ResultType;
+
+  using specialization = TaskQueueSpecialization<Scheduler>;
+  using member_type    = typename specialization::member_type;
+
+  KOKKOS_INLINE_FUNCTION
+  void apply_functor(member_type* const member, void*) {
+    this->functor_type::operator()(*member);
+  }
+
+  template <typename T>
+  KOKKOS_INLINE_FUNCTION void apply_functor(member_type* const member,
+                                            T* const result) {
+    this->functor_type::operator()(*member, *result);
+  }
+
+  KOKKOS_FUNCTION static void destroy(root_type* root) {
+    TaskResult<result_type>::destroy(root);
+  }
+
+  KOKKOS_FUNCTION static void apply(root_type* root, void* exec) {
+    Task* const task          = static_cast<Task*>(root);
+    member_type* const member = reinterpret_cast<member_type*>(exec);
+    result_type* const result = TaskResult<result_type>::ptr(task);
+
+    // Task may be serial or team.
+    // If team then must synchronize before querying if respawn was requested.
+    // If team then only one thread calls destructor.
+
+    const bool only_one_thread =
+#ifdef __CUDA_ARCH__  // FIXME_CUDA
+        0 == threadIdx.x && 0 == threadIdx.y;
+#else
+        0 == member->team_rank();
+#endif
+
+    task->apply_functor(member, result);
+
+    member->team_barrier();
+
+    if (only_one_thread && !(task->requested_respawn())) {
+      // Did not respawn, destroy the functor to free memory.
+      task->functor_type::~functor_type();
+      // Cannot destroy and deallocate the task until its dependences
+      // have been processed.
+    }
+  }
+
+  // Constructor for runnable task
+  KOKKOS_INLINE_FUNCTION constexpr Task(FunctorType&& arg_functor)
+      : root_type(), functor_type(std::move(arg_functor)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ~Task() = delete;
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKBASE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskNode.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskNode.hpp
new file mode 100644 (file)
index 0000000..5e2ebb0
--- /dev/null
@@ -0,0 +1,702 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_TASKNODE_HPP
+#define KOKKOS_IMPL_TASKNODE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_PointerOwnership.hpp>
+
+#include <impl/Kokkos_VLAEmulation.hpp>
+#include <impl/Kokkos_LIFO.hpp>
+#include <impl/Kokkos_ChaseLev.hpp>
+#include <impl/Kokkos_EBO.hpp>
+#include <Kokkos_Concepts.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+#ifdef KOKKOS_COMPILER_PGI
+// Bizzarely, an extra jump instruction forces the PGI compiler to not have a
+// bug related to (probably?) empty base optimization and/or aggregate
+// construction.  This must be defined out-of-line to generate a jump
+// jump instruction
+void _kokkos_pgi_compiler_bug_workaround();
+#endif
+
+enum TaskType : int16_t {
+  TaskTeam    = 0,
+  TaskSingle  = 1,
+  Aggregate   = 2,
+  TaskSpecial = -1
+};
+
+//==============================================================================
+
+/** Intrusive base class for things allocated with a Kokkos::MemoryPool
+ *
+ *  @warning Memory pools assume that the address of this class is the same
+ *           as the address of the most derived type that was allocated to
+ *           have the given size.  As a consequence, when interacting with
+ *           multiple inheritance, this must always be the first base class
+ *           of any derived class that uses it!
+ *  @todo Consider inverting inheritance structure to avoid this problem?
+ *
+ *  @tparam CountType type of integer used to store the allocation size
+ */
+template <class CountType = int32_t>
+class alignas(void*) PoolAllocatedObjectBase {
+ public:
+  using pool_allocation_size_type = CountType;
+
+ private:
+  pool_allocation_size_type m_alloc_size;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  constexpr explicit PoolAllocatedObjectBase(
+      pool_allocation_size_type allocation_size)
+      : m_alloc_size(allocation_size) {}
+
+  KOKKOS_INLINE_FUNCTION
+  CountType get_allocation_size() const noexcept { return m_alloc_size; }
+};
+
+//==============================================================================
+
+// TODO @tasking @cleanup DSH move this?
+template <class CountType = int32_t>
+class ReferenceCountedBase {
+ public:
+  using reference_count_size_type = CountType;
+
+ private:
+  reference_count_size_type m_ref_count = 0;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+#ifndef KOKKOS_COMPILER_PGI
+  constexpr
+#endif
+      explicit ReferenceCountedBase(
+          reference_count_size_type initial_reference_count)
+      : m_ref_count(initial_reference_count) {
+    // This can't be here because it breaks constexpr
+    // KOKKOS_EXPECTS(initial_reference_count > 0);
+#ifdef KOKKOS_COMPILER_PGI
+    Impl::_kokkos_pgi_compiler_bug_workaround();
+#endif
+  }
+
+  /** Decrement the reference count,
+   *  and return true iff this decrement caused
+   *  the reference count to become zero
+   */
+  KOKKOS_INLINE_FUNCTION
+  bool decrement_and_check_reference_count() {
+    // TODO @tasking @memory_order DSH memory order
+    auto old_count = Kokkos::atomic_fetch_add(&m_ref_count, -1);
+    Kokkos::memory_fence();
+
+    KOKKOS_ASSERT(old_count > 0 && "reference count greater less than zero!");
+
+    return (old_count == 1);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void increment_reference_count() {
+    Kokkos::Impl::desul_atomic_inc(&m_ref_count,
+                                   Kokkos::Impl::MemoryOrderSeqCst(),
+                                   Kokkos::Impl::MemoryScopeDevice());
+  }
+};
+
+template <class TaskQueueTraits, class SchedulingInfo>
+class AggregateTask;
+
+template <class TaskQueueTraits>
+class RunnableTaskBase;
+
+//==============================================================================
+
+template <class TaskQueueTraits>
+class TaskNode
+    : public PoolAllocatedObjectBase<int32_t>,  // size 4, must be first!
+      public ReferenceCountedBase<int32_t>,     // size 4
+      public TaskQueueTraits::template intrusive_task_base_type<
+          TaskNode<TaskQueueTraits>>  // size 8+
+{
+ public:
+  using priority_type = int16_t;
+
+ private:
+  using task_base_type              = TaskNode<TaskQueueTraits>;
+  using pool_allocated_base_type    = PoolAllocatedObjectBase<int32_t>;
+  using reference_counted_base_type = ReferenceCountedBase<int32_t>;
+  using task_queue_traits           = TaskQueueTraits;
+  using waiting_queue_type =
+      typename task_queue_traits::template waiting_queue_type<TaskNode>;
+
+  waiting_queue_type m_wait_queue;  // size 8+
+
+  // TODO @tasking @cleanup DSH eliminate this, or make its purpose a bit more
+  // clear.  It's only used in BasicFuture, and only for deallocation purposes
+  TaskQueueBase* m_ready_queue_base;
+
+  TaskType m_task_type;      // size 2
+  priority_type m_priority;  // size 2
+  bool m_is_respawning = false;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  constexpr TaskNode(TaskType task_type, TaskPriority priority,
+                     TaskQueueBase* queue_base,
+                     reference_count_size_type initial_reference_count,
+                     pool_allocation_size_type allocation_size)
+      : pool_allocated_base_type(
+            /* allocation_size = */ allocation_size),
+        reference_counted_base_type(
+            /* initial_reference_count = */ initial_reference_count),
+        m_wait_queue(),
+        m_ready_queue_base(queue_base),
+        m_task_type(task_type),
+        m_priority(static_cast<priority_type>(priority)),
+        m_is_respawning(false) {}
+
+  TaskNode()                = delete;
+  TaskNode(TaskNode const&) = delete;
+  TaskNode(TaskNode&&)      = delete;
+  TaskNode& operator=(TaskNode const&) = delete;
+  TaskNode& operator=(TaskNode&&) = delete;
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_aggregate() const noexcept {
+    return m_task_type == TaskType::Aggregate;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_runnable() const noexcept {
+    return m_task_type != TaskType::Aggregate;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_runnable() const volatile noexcept {
+    return m_task_type != TaskType::Aggregate;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_single_runnable() const noexcept {
+    return m_task_type == TaskType::TaskSingle;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool is_team_runnable() const noexcept {
+    return m_task_type == TaskType::TaskTeam;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  TaskType get_task_type() const noexcept { return m_task_type; }
+
+  KOKKOS_INLINE_FUNCTION
+  RunnableTaskBase<TaskQueueTraits>& as_runnable_task() & {
+    KOKKOS_EXPECTS(this->is_runnable());
+    return static_cast<RunnableTaskBase<TaskQueueTraits>&>(*this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  RunnableTaskBase<TaskQueueTraits> const& as_runnable_task() const& {
+    KOKKOS_EXPECTS(this->is_runnable());
+    return static_cast<RunnableTaskBase<TaskQueueTraits> const&>(*this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  RunnableTaskBase<TaskQueueTraits> volatile& as_runnable_task() volatile& {
+    KOKKOS_EXPECTS(this->is_runnable());
+    return static_cast<RunnableTaskBase<TaskQueueTraits> volatile&>(*this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  RunnableTaskBase<TaskQueueTraits> const volatile& as_runnable_task() const
+      volatile& {
+    KOKKOS_EXPECTS(this->is_runnable());
+    return static_cast<RunnableTaskBase<TaskQueueTraits> const volatile&>(
+        *this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  RunnableTaskBase<TaskQueueTraits>&& as_runnable_task() && {
+    KOKKOS_EXPECTS(this->is_runnable());
+    return static_cast<RunnableTaskBase<TaskQueueTraits>&&>(*this);
+  }
+
+  template <class SchedulingInfo>
+  KOKKOS_INLINE_FUNCTION AggregateTask<TaskQueueTraits, SchedulingInfo>&
+  as_aggregate() & {
+    KOKKOS_EXPECTS(this->is_aggregate());
+    return static_cast<AggregateTask<TaskQueueTraits, SchedulingInfo>&>(*this);
+  }
+
+  template <class SchedulingInfo>
+  KOKKOS_INLINE_FUNCTION AggregateTask<TaskQueueTraits, SchedulingInfo> const&
+  as_aggregate() const& {
+    KOKKOS_EXPECTS(this->is_aggregate());
+    return static_cast<AggregateTask<TaskQueueTraits, SchedulingInfo> const&>(
+        *this);
+  }
+
+  template <class SchedulingInfo>
+  KOKKOS_INLINE_FUNCTION AggregateTask<TaskQueueTraits, SchedulingInfo>&&
+  as_aggregate() && {
+    KOKKOS_EXPECTS(this->is_aggregate());
+    return static_cast<AggregateTask<TaskQueueTraits, SchedulingInfo>&&>(*this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool try_add_waiting(task_base_type& depends_on_this) {
+    return m_wait_queue.try_push(depends_on_this);
+  }
+
+  template <class Function>
+  KOKKOS_INLINE_FUNCTION void consume_wait_queue(Function&& f) {
+    KOKKOS_EXPECTS(!m_wait_queue.is_consumed());
+    m_wait_queue.consume(std::forward<Function>(f));
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool wait_queue_is_consumed() const noexcept {
+    // TODO @tasking @memory_order DSH memory order
+    return m_wait_queue.is_consumed();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  TaskQueueBase* ready_queue_base_ptr() const noexcept {
+    return m_ready_queue_base;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void set_priority(TaskPriority priority) noexcept {
+    KOKKOS_EXPECTS(!this->is_enqueued());
+    m_priority = (priority_type)priority;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void set_priority(TaskPriority priority) volatile noexcept {
+    KOKKOS_EXPECTS(!this->is_enqueued());
+    m_priority = (priority_type)priority;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  TaskPriority get_priority() const noexcept {
+    return (TaskPriority)m_priority;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  bool get_respawn_flag() const { return m_is_respawning; }
+
+  KOKKOS_INLINE_FUNCTION
+  void set_respawn_flag(bool value = true) { m_is_respawning = value; }
+
+  KOKKOS_INLINE_FUNCTION
+  void set_respawn_flag(bool value = true) volatile { m_is_respawning = value; }
+};
+
+//==============================================================================
+
+template <class BaseClass, class SchedulingInfo>
+class SchedulingInfoStorage;
+
+//==============================================================================
+
+template <class BaseType, class SchedulingInfo>
+class SchedulingInfoStorage
+    : public BaseType,  // must be first base class for allocation reasons!!!
+      private NoUniqueAddressMemberEmulation<SchedulingInfo> {
+ private:
+  using base_t                    = BaseType;
+  using task_scheduling_info_type = SchedulingInfo;
+
+ public:
+  // Can't just do using base_t::base_t because of stupid stuff with clang cuda
+  template <class... Args>
+  // requires std::is_constructible_v<base_t, Args&&...>
+  KOKKOS_INLINE_FUNCTION constexpr explicit SchedulingInfoStorage(
+      Args&&... args)
+      : base_t(std::forward<Args>(args)...) {}
+
+  KOKKOS_INLINE_FUNCTION
+  task_scheduling_info_type& scheduling_info() & {
+    return this->no_unique_address_data_member();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  task_scheduling_info_type const& scheduling_info() const& {
+    return this->no_unique_address_data_member();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  task_scheduling_info_type&& scheduling_info() && {
+    return std::move(*this).no_unique_address_data_member();
+  }
+};
+
+//==============================================================================
+
+template <class TaskQueueTraits, class SchedulingInfo>
+class alignas(16) AggregateTask final
+    : public SchedulingInfoStorage<TaskNode<TaskQueueTraits>,
+                                   SchedulingInfo>,  // must be first base class
+                                                     // for allocation
+                                                     // reasons!!!
+      public ObjectWithVLAEmulation<
+          AggregateTask<TaskQueueTraits, SchedulingInfo>,
+          OwningRawPtr<TaskNode<TaskQueueTraits>>> {
+ private:
+  using base_t =
+      SchedulingInfoStorage<TaskNode<TaskQueueTraits>, SchedulingInfo>;
+  using vla_base_t =
+      ObjectWithVLAEmulation<AggregateTask<TaskQueueTraits, SchedulingInfo>,
+                             OwningRawPtr<TaskNode<TaskQueueTraits>>>;
+
+  using task_base_type = TaskNode<TaskQueueTraits>;
+
+ public:
+  using aggregate_task_type = AggregateTask;  // concept marker
+
+  template <class... Args>
+  // requires std::is_constructible_v<base_t, Args&&...>
+  KOKKOS_INLINE_FUNCTION constexpr explicit AggregateTask(
+      int32_t aggregate_predecessor_count, Args&&... args)
+      : base_t(TaskType::Aggregate,
+               TaskPriority::Regular,  // all aggregates are regular priority
+               std::forward<Args>(args)...),
+        vla_base_t(aggregate_predecessor_count) {}
+
+  KOKKOS_INLINE_FUNCTION
+  int32_t dependence_count() const { return this->n_vla_entries(); }
+};
+
+// KOKKOS_IMPL_IS_CONCEPT(aggregate_task);
+
+//==============================================================================
+
+template <class TaskQueueTraits>
+class RunnableTaskBase
+    : public TaskNode<TaskQueueTraits>  // must be first base class for
+                                        // allocation reasons!!!
+{
+ private:
+  using base_t = TaskNode<TaskQueueTraits>;
+
+ public:
+  using task_base_type     = TaskNode<TaskQueueTraits>;
+  using function_type      = void (*)(task_base_type*, void*);
+  using destroy_type       = void (*)(task_base_type*);
+  using runnable_task_type = RunnableTaskBase;
+
+ private:
+  function_type m_apply;
+  task_base_type* m_predecessor = nullptr;
+
+ public:
+  template <class... Args>
+  // requires std::is_constructible_v<base_t, Args&&...>
+  KOKKOS_INLINE_FUNCTION constexpr explicit RunnableTaskBase(
+      function_type apply_function_ptr, Args&&... args)
+      : base_t(std::forward<Args>(args)...), m_apply(apply_function_ptr) {}
+
+  KOKKOS_INLINE_FUNCTION
+  bool has_predecessor() const { return m_predecessor != nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  void clear_predecessor() { m_predecessor = nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  void clear_predecessor() volatile { m_predecessor = nullptr; }
+
+  template <class SchedulingInfo>
+  KOKKOS_INLINE_FUNCTION SchedulingInfo& scheduling_info_as() {
+    using info_storage_type =
+        SchedulingInfoStorage<RunnableTaskBase, SchedulingInfo>;
+
+    return static_cast<info_storage_type*>(this)->scheduling_info();
+  }
+
+  template <class SchedulingInfo>
+  KOKKOS_INLINE_FUNCTION SchedulingInfo const& scheduling_info_as() const {
+    using info_storage_type =
+        SchedulingInfoStorage<RunnableTaskBase, SchedulingInfo>;
+
+    return static_cast<info_storage_type const*>(this)->scheduling_info();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  task_base_type& get_predecessor() const {
+    KOKKOS_EXPECTS(m_predecessor != nullptr);
+    return *m_predecessor;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void set_predecessor(task_base_type& predecessor) {
+    KOKKOS_EXPECTS(m_predecessor == nullptr);
+    // Increment the reference count so that predecessor doesn't go away
+    // before this task is enqueued.
+    // (should be memory order acquire)
+    predecessor.increment_reference_count();
+    m_predecessor = &predecessor;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void acquire_predecessor_from(runnable_task_type& other) {
+    KOKKOS_EXPECTS(m_predecessor == nullptr ||
+                   other.m_predecessor == m_predecessor);
+    // since we're transferring, no need to modify the reference count
+    m_predecessor       = other.m_predecessor;
+    other.m_predecessor = nullptr;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void acquire_predecessor_from(runnable_task_type& other) volatile {
+    KOKKOS_EXPECTS(m_predecessor == nullptr ||
+                   other.m_predecessor == m_predecessor);
+    // since we're transferring, no need to modify the reference count
+    m_predecessor       = other.m_predecessor;
+    other.m_predecessor = nullptr;
+  }
+
+  template <class TeamMember>
+  KOKKOS_INLINE_FUNCTION void run(TeamMember& member) {
+    (*m_apply)(this, &member);
+  }
+};
+
+// KOKKOS_IMPL_IS_CONCEPT(runnable_task);
+
+//==============================================================================
+
+template <class ResultType, class Base>
+class TaskResultStorage : public Base {
+ private:
+  using base_t = Base;
+
+  alignas(Base) ResultType m_value = ResultType{};
+
+ public:
+  // using base_t::base_t;
+  // Can't just do using base_t::base_t because of stupid stuff with clang cuda
+  template <class... Args>
+  // requires std::is_constructible_v<base_t, Args&&...>
+  KOKKOS_INLINE_FUNCTION constexpr explicit TaskResultStorage(Args&&... args)
+      : base_t(std::forward<Args>(args)...) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ResultType* value_pointer() {
+    // Over-alignment makes this a non-standard-layout class,
+    // so alignas() doesn't work
+    // static_assert(
+    //  offsetof(TaskResultStorage, m_value) == sizeof(Base),
+    //  "TaskResultStorage must be POD for layout purposes"
+    //);
+    return &m_value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  ResultType& value_reference() { return m_value; }
+};
+
+// TODO @tasking @optimization DSH optimization for empty types (in addition to
+// void)
+template <class Base>
+class TaskResultStorage<void, Base> : public Base {
+ private:
+  using base_t = Base;
+
+ public:
+  // using base_t::base_t;
+  // Can't just do using base_t::base_t because of stupid stuff with clang cuda
+  template <class... Args>
+  // requires std::is_constructible_v<base_t, Args&&...>
+  KOKKOS_INLINE_FUNCTION constexpr explicit TaskResultStorage(Args&&... args)
+      : base_t(std::forward<Args>(args)...) {}
+
+  KOKKOS_INLINE_FUNCTION
+  void* value_pointer() noexcept { return nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  void value_reference() noexcept {}
+};
+
+//==============================================================================
+
+template <class TaskQueueTraits, class Scheduler, class ResultType,
+          class FunctorType>
+class alignas(16) RunnableTask
+    :  // using nesting of base classes to control layout; multiple empty base
+       // classes may not be ABI compatible with CUDA on Windows
+       public TaskResultStorage<
+           ResultType,
+           SchedulingInfoStorage<RunnableTaskBase<TaskQueueTraits>,
+                                 typename Scheduler::task_queue_type::
+                                     task_scheduling_info_type>>,  // must be
+                                                                   // first base
+                                                                   // class
+       public FunctorType {
+ private:
+  using base_t = TaskResultStorage<
+      ResultType,
+      SchedulingInfoStorage<
+          RunnableTaskBase<TaskQueueTraits>,
+          typename Scheduler::task_queue_type::task_scheduling_info_type>>;
+
+  using runnable_task_base_type = RunnableTaskBase<TaskQueueTraits>;
+  using scheduler_type          = Scheduler;
+  using scheduling_info_type =
+      typename scheduler_type::task_scheduling_info_type;
+  using scheduling_info_storage_base = base_t;
+
+  using task_base_type = TaskNode<TaskQueueTraits>;
+  using specialization = TaskQueueSpecialization<scheduler_type>;
+  using member_type    = typename specialization::member_type;
+  using result_type    = ResultType;
+  using functor_type   = FunctorType;
+
+ public:
+  template <class... Args>
+  // requires std::is_constructible_v<base_t, Args&&...>
+  KOKKOS_INLINE_FUNCTION constexpr explicit RunnableTask(FunctorType&& functor,
+                                                         Args&&... args)
+      : base_t(std::forward<Args>(args)...), functor_type(std::move(functor)) {}
+
+  KOKKOS_INLINE_FUNCTION
+  ~RunnableTask() = delete;
+
+  KOKKOS_INLINE_FUNCTION
+  void update_scheduling_info(member_type& /*member*/) {
+    // TODO @tasking @generalization DSH call a queue-specific hook here; for
+    // now, this info is already updated elsewhere this->scheduling_info() =
+    // member.scheduler().scheduling_info();
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void apply_functor(member_type* member, void*) {
+    update_scheduling_info(*member);
+    this->functor_type::operator()(*member);
+  }
+
+  template <typename T>
+  KOKKOS_INLINE_FUNCTION void apply_functor(member_type* member, T* val) {
+    update_scheduling_info(*member);
+    // this->functor_type::operator()(*member, *val);
+    this->functor_type::operator()(*member, *val);
+  }
+
+  KOKKOS_FUNCTION static void destroy(task_base_type* /*root*/) {
+    // TaskResult<result_type>::destroy(root);
+  }
+
+  KOKKOS_FUNCTION static void apply(task_base_type* self,
+                                    void* member_as_void) {
+    using task_type = Impl::RunnableTask<TaskQueueTraits, Scheduler, ResultType,
+                                         FunctorType>*;
+    auto* const task   = static_cast<task_type>(self);
+    auto* const member = reinterpret_cast<member_type*>(member_as_void);
+
+    // Now that we're over-aligning the result storage, this isn't a problem any
+    // more
+    // static_assert(std::is_standard_layout<task_type>::value,
+    //  "Tasks must be standard layout"
+    //);
+    // static_assert(std::is_pod<task_type>::value,
+    //  "Tasks must be PODs"
+    //);
+
+    // Task may be serial or team.
+    // If team then must synchronize before querying if respawn was requested.
+    // If team then only one thread calls destructor.
+
+    const bool only_one_thread =
+#ifdef __CUDA_ARCH__  // FIXME_CUDA
+        0 == threadIdx.x && 0 == threadIdx.y;
+#else
+        0 == member->team_rank();
+#endif
+
+    // Ensure that the respawn flag is set to zero
+    self->set_respawn_flag(false);
+
+    // task->apply_functor(member, TaskResult<result_type>::ptr(task));
+    task->apply_functor(member, task->value_pointer());
+
+    member->team_barrier();
+
+    if (only_one_thread && !(task->get_respawn_flag())) {
+      // Did not respawn, destroy the functor to free memory.
+      task->functor_type::~functor_type();
+      // Cannot destroy and deallocate the task until its dependences
+      // have been processed.
+    }
+  }
+};
+
+} /* namespace Impl */
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKNODE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskPolicyData.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskPolicyData.hpp
new file mode 100644 (file)
index 0000000..0911362
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_TASKPOLICYDATA_HPP
+#define KOKKOS_IMPL_TASKPOLICYDATA_HPP
+
+//----------------------------------------------------------------------------
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_TaskScheduler_fwd.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+
+template <int TaskEnum, typename DepFutureType>
+struct TaskPolicyWithPredecessor {
+ private:
+  DepFutureType m_predecessor;
+  Kokkos::TaskPriority m_priority;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  TaskPolicyWithPredecessor(DepFutureType arg_predecessor,
+                            Kokkos::TaskPriority arg_priority)
+      : m_predecessor(std::move(arg_predecessor)), m_priority(arg_priority) {}
+
+  TaskPolicyWithPredecessor() = delete;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithPredecessor(TaskPolicyWithPredecessor const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithPredecessor(TaskPolicyWithPredecessor&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithPredecessor& operator=(TaskPolicyWithPredecessor const&) =
+      default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithPredecessor& operator=(TaskPolicyWithPredecessor&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~TaskPolicyWithPredecessor() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  DepFutureType&& predecessor() && { return std::move(m_predecessor); }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr TaskPriority priority() const { return m_priority; }
+
+  KOKKOS_INLINE_FUNCTION
+  static constexpr int task_type() noexcept { return TaskEnum; }
+};
+
+// TODO @tasking @cleanup DSH clean this up. Using nullptr_t here is too clever
+template <int TaskEnum, typename Scheduler,
+          typename PredecessorFuture = std::nullptr_t>
+struct TaskPolicyWithScheduler {
+ public:
+  using predecessor_future_type = PredecessorFuture;
+
+ private:
+  Scheduler m_scheduler;
+  Kokkos::TaskPriority m_priority;
+  predecessor_future_type m_predecessor;
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  TaskPolicyWithScheduler(Scheduler arg_scheduler,
+                          Kokkos::TaskPriority arg_priority)
+      : m_scheduler(std::move(arg_scheduler)), m_priority(arg_priority) {}
+
+  KOKKOS_INLINE_FUNCTION
+  TaskPolicyWithScheduler(Scheduler arg_scheduler,
+                          predecessor_future_type arg_predecessor,
+                          Kokkos::TaskPriority arg_priority)
+      : m_scheduler(std::move(arg_scheduler)),
+        m_priority(arg_priority),
+        m_predecessor(std::move(arg_predecessor)) {}
+
+  TaskPolicyWithScheduler() = delete;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithScheduler(TaskPolicyWithScheduler const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithScheduler(TaskPolicyWithScheduler&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithScheduler& operator=(TaskPolicyWithScheduler const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskPolicyWithScheduler& operator=(TaskPolicyWithScheduler&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  ~TaskPolicyWithScheduler() = default;
+
+  KOKKOS_INLINE_FUNCTION
+  Scheduler& scheduler() & { return m_scheduler; }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr TaskPriority priority() const { return m_priority; }
+
+  KOKKOS_INLINE_FUNCTION
+  predecessor_future_type& predecessor() & { return m_predecessor; }
+
+  KOKKOS_INLINE_FUNCTION
+  static constexpr bool has_predecessor() noexcept {
+    return !std::is_same<PredecessorFuture, std::nullptr_t>::value;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static constexpr int task_type() noexcept { return TaskEnum; }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKPOLICYDATA_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueue.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueue.hpp
new file mode 100644 (file)
index 0000000..4f565f0
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_TASKQUEUE_HPP
+#define KOKKOS_IMPL_TASKQUEUE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <impl/Kokkos_TaskBase.hpp>
+#include <impl/Kokkos_TaskResult.hpp>
+
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_LIFO.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/** \brief  Manage task allocation, deallocation, and scheduling.
+ *
+ *  Task execution is deferred to the TaskQueueSpecialization.
+ *  All other aspects of task management have shared implementation.
+ */
+template <typename ExecSpace, typename MemorySpace>
+class TaskQueue : public TaskQueueBase {
+ protected:
+  template <class>
+  friend struct TaskQueueSpecialization;
+  template <class, class>
+  friend class TaskQueueSpecializationConstrained;
+  template <class, class>
+  friend class Kokkos::BasicTaskScheduler;
+
+  using execution_space = ExecSpace;
+  using memory_space    = MemorySpace;
+  using device_type     = Kokkos::Device<execution_space, memory_space>;
+  using memory_pool     = Kokkos::MemoryPool<device_type>;
+  using task_root_type  = Kokkos::Impl::TaskBase;
+  using team_queue_type = TaskQueue;
+
+  struct Destroy {
+    TaskQueue* m_queue;
+    void destroy_shared_allocation();
+  };
+
+  //----------------------------------------
+
+  enum : int { NumQueue = 3 };
+
+  // Queue is organized as [ priority ][ type ]
+
+  memory_pool m_memory;
+  task_root_type* volatile m_ready[NumQueue][2];
+  // long                      m_accum_alloc ; // Accumulated number of
+  // allocations
+  int m_count_alloc = 0;  // Current number of allocations
+  int m_max_alloc;        // Maximum number of allocations
+  int m_ready_count;      // Number of ready or executing
+
+  //----------------------------------------
+
+  ~TaskQueue();
+  TaskQueue()                 = delete;
+  TaskQueue(TaskQueue&&)      = delete;
+  TaskQueue(TaskQueue const&) = delete;
+  TaskQueue& operator=(TaskQueue&&) = delete;
+  TaskQueue& operator=(TaskQueue const&) = delete;
+
+  TaskQueue(const memory_pool& arg_memory_pool);
+
+  // Schedule a task
+  //   Precondition:
+  //     task is not executing
+  //     task->m_next is the dependence or zero
+  //   Postcondition:
+  //     task->m_next is linked list membership
+  KOKKOS_FUNCTION void schedule_runnable(task_root_type*);
+  KOKKOS_FUNCTION void schedule_aggregate(task_root_type*);
+
+  // Reschedule a task
+  //   Precondition:
+  //     task is in Executing state
+  //     task->m_next == LockTag
+  //   Postcondition:
+  //     task is in Executing-Respawn state
+  //     task->m_next == 0 (no dependence)
+  KOKKOS_FUNCTION
+  void reschedule(task_root_type*);
+
+  // Complete a task
+  //   Precondition:
+  //     task is not executing
+  //     task->m_next == LockTag  =>  task is complete
+  //     task->m_next != LockTag  =>  task is respawn
+  //   Postcondition:
+  //     task->m_wait == LockTag  =>  task is complete
+  //     task->m_wait != LockTag  =>  task is waiting
+  KOKKOS_FUNCTION
+  void complete(task_root_type*);
+
+  KOKKOS_FUNCTION
+  static bool push_task(task_root_type* volatile* const, task_root_type* const);
+
+  KOKKOS_FUNCTION
+  static task_root_type* pop_ready_task(task_root_type* volatile* const);
+
+  KOKKOS_FUNCTION static void decrement(task_root_type* task);
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  int allocation_count() const noexcept { return m_count_alloc; }
+
+  KOKKOS_INLINE_FUNCTION
+  void initialize_team_queues(int /*pool_size*/) const noexcept {}
+
+  KOKKOS_INLINE_FUNCTION
+  task_root_type* attempt_to_steal_task() const noexcept { return nullptr; }
+
+  KOKKOS_INLINE_FUNCTION
+  team_queue_type& get_team_queue(int /*team_rank*/) { return *this; }
+
+  // void execute() { specialization::execute( this ); }
+
+  template <typename FunctorType>
+  void proc_set_apply(typename task_root_type::function_type* ptr) {
+    using specialization =
+        TaskQueueSpecialization<BasicTaskScheduler<ExecSpace, TaskQueue>>;
+    specialization::template proc_set_apply<FunctorType>(ptr);
+  }
+
+  // Assign task pointer with reference counting of assigned tasks
+  KOKKOS_FUNCTION static void assign(task_root_type** const lhs,
+                                     task_root_type* const rhs) {
+    if (*lhs) decrement(*lhs);
+    if (rhs) {
+      Kokkos::Impl::desul_atomic_inc(&rhs->m_ref_count,
+                                     Kokkos::Impl::MemoryOrderSeqCst(),
+                                     Kokkos::Impl::MemoryScopeDevice());
+    }
+
+    // Force write of *lhs
+
+    *static_cast<task_root_type* volatile*>(lhs) = rhs;
+
+    Kokkos::memory_fence();
+  }
+
+  KOKKOS_FUNCTION
+  size_t allocate_block_size(size_t n);  ///< Actual block size allocated
+
+  KOKKOS_FUNCTION
+  void* allocate(size_t n);  ///< Allocate from the memory pool
+
+  KOKKOS_FUNCTION
+  void deallocate(void* p, size_t n);  ///< Deallocate to the memory pool
+
+  //----------------------------------------
+  /**\brief  Allocation size for a spawned task */
+
+  template <typename FunctorType>
+  KOKKOS_FUNCTION size_t spawn_allocation_size() const {
+    using value_type = typename FunctorType::value_type;
+
+    using task_type = Impl::Task<execution_space, value_type, FunctorType>;
+
+    constexpr size_t task_size = sizeof(task_type);
+
+    return m_memory.allocate_block_size(task_size);
+  }
+
+  /**\brief  Allocation size for a when_all aggregate */
+
+  KOKKOS_FUNCTION
+  size_t when_all_allocation_size(int narg) const {
+    return m_memory.allocate_block_size(sizeof(task_root_type) +
+                                        narg * sizeof(task_root_type*));
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKQUEUE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueCommon.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueCommon.hpp
new file mode 100644 (file)
index 0000000..82af562
--- /dev/null
@@ -0,0 +1,508 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_TASKQUEUECOMMON_HPP
+#define KOKKOS_IMPL_TASKQUEUECOMMON_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <impl/Kokkos_TaskNode.hpp>
+#include <impl/Kokkos_TaskResult.hpp>
+
+#include <impl/Kokkos_TaskQueueMemoryManager.hpp>
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_LIFO.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/// @brief CRTP Base class implementing the ready count parts common to most
+/// task queues
+template <class Derived>
+class TaskQueueCommonMixin {
+ private:
+  int32_t m_ready_count = 0;
+
+  // CRTP boilerplate
+  KOKKOS_INLINE_FUNCTION
+  Derived& _self() { return *static_cast<Derived*>(this); }
+
+ public:
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Constructors, destructor, and assignment"> {{{2
+
+  TaskQueueCommonMixin() : m_ready_count(0) {
+    Kokkos::memory_fence();
+    // TODO @tasking @memory_order DSH figure out if I need this store to be
+    // atomic
+  }
+
+  ~TaskQueueCommonMixin() {
+    KOKKOS_EXPECTS((Kokkos::memory_fence(), m_ready_count < 1));
+    KOKKOS_EXPECTS(m_ready_count == 0);
+  }
+
+  // </editor-fold> end Constructors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Task and queue completion"> {{{2
+
+ private:
+  // This would be more readable with a lambda, but that comes with
+  // all the baggage associated with a lambda (compilation times, bugs with
+  // nvcc, etc.), so we'll use a simple little helper functor here.
+  template <class TaskQueueTraits, class TeamSchedulerInfo>
+  struct _schedule_waiting_tasks_operation {
+    TaskNode<TaskQueueTraits> const& m_predecessor;
+    Derived& m_queue;
+    TeamSchedulerInfo const& m_info;
+    KOKKOS_INLINE_FUNCTION
+    void operator()(TaskNode<TaskQueueTraits>&& task) const noexcept
+    // requires Same<TaskType, Derived::task_base_type>
+    {
+      using task_scheduling_info_type =
+          typename Derived::task_scheduling_info_type;
+      if (task.is_runnable())  // KOKKOS_LIKELY
+      {
+        // TODO @tasking @optimiazation DSH check this outside of the loop ?
+        if (m_predecessor.is_runnable()) {
+          m_queue.update_scheduling_info_from_completed_predecessor(
+              /* ready_task = */ task.as_runnable_task(),
+              /* predecessor = */ m_predecessor.as_runnable_task());
+        } else {
+          KOKKOS_ASSERT(m_predecessor.is_aggregate());
+          m_queue.update_scheduling_info_from_completed_predecessor(
+              /* ready_task = */ task.as_runnable_task(),
+              /* predecessor = */ m_predecessor
+                  .template as_aggregate<task_scheduling_info_type>());
+        }
+        m_queue.schedule_runnable(std::move(task).as_runnable_task(), m_info);
+      } else {
+        // The scheduling info update happens inside of schedule_aggregate
+        m_queue.schedule_aggregate(
+            std::move(task).template as_aggregate<task_scheduling_info_type>(),
+            m_info);
+      }
+    }
+  };
+
+ protected:
+  template <class TaskQueueTraits, class TeamSchedulerInfo>
+  KOKKOS_FUNCTION void _complete_finished_task(TaskNode<TaskQueueTraits>&& task,
+                                               TeamSchedulerInfo const& info) {
+    task.consume_wait_queue(
+        _schedule_waiting_tasks_operation<TaskQueueTraits, TeamSchedulerInfo>{
+            task, _self(), info});
+    bool should_delete = task.decrement_and_check_reference_count();
+    if (should_delete) {
+      _self().deallocate(std::move(task));
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void _increment_ready_count() {
+    // TODO @tasking @memory_order DSH memory order
+    Kokkos::Impl::desul_atomic_inc(&this->m_ready_count,
+                                   Kokkos::Impl::MemoryOrderSeqCst(),
+                                   Kokkos::Impl::MemoryScopeDevice());
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  void _decrement_ready_count() {
+    // TODO @tasking @memory_order DSH memory order
+    Kokkos::Impl::desul_atomic_dec(&this->m_ready_count,
+                                   Kokkos::Impl::MemoryOrderSeqCst(),
+                                   Kokkos::Impl::MemoryScopeDevice());
+  }
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  bool is_done() const noexcept {
+    // TODO @tasking @memory_order DSH Memory order, instead of volatile
+    return (*(volatile int*)(&m_ready_count)) == 0;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  int32_t ready_count() const noexcept {
+    // TODO @tasking @memory_order DSH Memory order, instead of volatile
+    return (*(volatile int*)(&m_ready_count));
+  }
+
+  template <class TaskQueueTraits, class TeamSchedulerInfo>
+  KOKKOS_FUNCTION void complete(RunnableTaskBase<TaskQueueTraits>&& task,
+                                TeamSchedulerInfo const& info) {
+    if (task.get_respawn_flag()) {
+      _self().schedule_runnable(std::move(task), info);
+    } else {
+      _complete_finished_task(std::move(task), info);
+    }
+    // A runnable task was popped from a ready queue finished executing.
+    // If respawned into a ready queue then the ready count was incremented
+    // so decrement whether respawned or not.  If finished, all of the
+    // tasks waiting on this have been enqueued (either in the ready queue
+    // or the next waiting queue, in the case of an aggregate), and the
+    // ready count has been incremented for each of those, preventing
+    // quiescence.  Thus, it's safe to decrement the ready count here.
+    // TODO @tasking @memory_order DSH memory order? (probably release)
+    _decrement_ready_count();
+  }
+
+  template <class TaskQueueTraits, class SchedulingInfo,
+            class TeamSchedulerInfo>
+  KOKKOS_FUNCTION void complete(
+      AggregateTask<TaskQueueTraits, SchedulingInfo>&& task,
+      TeamSchedulerInfo const& info) {
+    // TODO @tasking DSH old code has a ifndef __HIP_DEVICE_COMPILE__ here;
+    // figure out why
+    _complete_finished_task(std::move(task), info);
+  }
+
+  // </editor-fold> end Task and queue completion }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Scheduling"> {{{2
+
+ public:
+  // This isn't actually generic; the template parameters are just to keep
+  // Derived from having to be complete
+  template <class TaskQueueTraits, class ReadyQueueType,
+            class TeamSchedulerInfo>
+  KOKKOS_INLINE_FUNCTION void schedule_runnable_to_queue(
+      RunnableTaskBase<TaskQueueTraits>&& task, ReadyQueueType& ready_queue,
+      TeamSchedulerInfo const& info) {
+    bool task_is_ready           = true;
+    bool scheduling_info_updated = false;
+
+    // do this before enqueueing and potentially losing exclusive access to task
+    bool task_is_respawning = task.get_respawn_flag();
+
+    // clear the respawn flag, since we're handling the respawn (if any) here.
+    // We must make sure this is written through the cache, since the next
+    // thread to access it might be a Cuda thread from a different thread block.
+    ((RunnableTaskBase<TaskQueueTraits> volatile&)task).set_respawn_flag(false);
+
+    if (task.has_predecessor()) {
+      // save the predecessor into a local variable, then clear it from the
+      // task before adding it to the wait queue of the predecessor
+      // (We have exclusive access to the task's predecessor, so we don't need
+      // to do this atomically)
+      // TODO @tasking @internal_documentation DSH document that we expect
+      // exclusive access to `task` in this function
+      auto& predecessor = task.get_predecessor();
+      // This needs a load/store fence here, technically
+      // making this a release store would also do this
+      ((RunnableTaskBase<TaskQueueTraits> volatile&)task).clear_predecessor();
+
+      // TODO @tasking @memory_order DSH remove this fence in favor of memory
+      // orders
+      Kokkos::memory_fence();  // for now
+
+      // Try to add the task to the predecessor's waiting queue.  If it fails,
+      // the predecessor is already done
+      bool predecessor_not_ready = predecessor.try_add_waiting(task);
+
+      // NOTE: if the predecessor was not ready and the task was enqueued,
+      // we've lost exclusive access and should nt touch task again
+
+      // If the predecessor is not done, then task is not ready
+      task_is_ready = !predecessor_not_ready;
+
+      if (task_is_ready && predecessor.is_runnable()) {
+        // this is our last chance to update the scheduling info before
+        // predecessor is potentially deleted
+        _self().update_scheduling_info_from_completed_predecessor(
+            /* ready_task = */ task,
+            /* predecessor = */ predecessor.as_runnable_task());
+        scheduling_info_updated = true;
+      }
+
+      if (task_is_respawning) {
+        // Reference count for predecessor was incremented when
+        // respawn called set_dependency()
+        // so that if predecessor completed prior to the
+        // above try_add_waiting(), predecessor would not be destroyed.
+        // predecessor reference count can now be decremented,
+        // which may deallocate it.
+        bool should_delete = predecessor.decrement_and_check_reference_count();
+        if (should_delete) {
+          // TODO @tasking @cleanup DSH better encapsulation of this!
+          _self().deallocate(std::move(predecessor));
+        }
+      }
+      // Note! predecessor may be destroyed at this point, so don't add anything
+      // here
+    }
+
+    if (scheduling_info_updated) {
+      // We need to go back to the queue itself and see if it wants to schedule
+      // somewhere else
+      _self().schedule_runnable(std::move(task), info);
+    }
+    // Put it in the appropriate ready queue if it's ready
+    else if (task_is_ready) {
+      // Increment the ready count
+      _self()._increment_ready_count();
+      // and enqueue the task
+      // (can't move because the task isn't expired unless the push succeeds
+      bool push_success = ready_queue.push(task);
+      if (!push_success) {
+        _self().handle_failed_ready_queue_insertion(std::move(task),
+                                                    ready_queue, info);
+      }
+    }
+
+    // Task may be enqueued and may be run at any point; don't touch it (hence
+    // the use of move semantics)
+  }
+
+  template <class TaskQueueTraits, class ReadyQueueType,
+            class TeamSchedulerInfo>
+  KOKKOS_INLINE_FUNCTION void handle_failed_ready_queue_insertion(
+      RunnableTaskBase<TaskQueueTraits>&& /*task*/,
+      ReadyQueueType& /*ready_queue*/, TeamSchedulerInfo const& /*info*/) {
+    Kokkos::abort("Unhandled failure of ready task queue insertion!\n");
+  }
+
+  // This isn't actually generic; the template parameters are just to keep
+  // Derived from having to be complete
+  template <class TaskQueueTraits, class SchedulingInfo,
+            class TeamSchedulerInfo>
+  KOKKOS_FUNCTION void schedule_aggregate(
+      AggregateTask<TaskQueueTraits, SchedulingInfo>&& aggregate,
+      TeamSchedulerInfo const& info) {
+    // Because the aggregate is being scheduled, should not be in any queue
+    KOKKOS_EXPECTS(!aggregate.is_enqueued());
+
+    using task_scheduling_info_type =
+        typename Derived::task_scheduling_info_type;
+    using team_scheduler_info_type = typename Derived::team_scheduler_info_type;
+    static_assert(
+        std::is_same<TeamSchedulerInfo, team_scheduler_info_type>::value,
+        "SchedulingInfo type mismatch!");
+
+    bool incomplete_dependence_found = false;
+
+    for (auto*& predecessor_ptr_ref : aggregate) {
+      // if a previous scheduling operation hasn't already set the predecessor
+      // to nullptr, try to enqueue the aggregate into the predecessorendence's
+      // waiting queue
+      if (predecessor_ptr_ref != nullptr) {
+        // Swap the pointer onto the stack and set the one in the aggregate VLA
+        // to nullptr before we try to add it to the waiting queue so that some
+        // other thread doesn't also get to here and find the pointer to be
+        // not null (since as soon as we try and schedule the aggregate, we
+        // potentially lose exclusive access to it if that enqueueing operation
+        // succeeds.  The swap doesn't need to happen atomically since we have
+        // exclusive access to aggregate until an insertion succeeds
+        auto* predecessor_ptr = std::move(predecessor_ptr_ref);
+
+        // TODO @tasking @memory_order DSH I think this needs to be a store
+        // release so that it doesn't get reordered after the queue insertion
+        predecessor_ptr_ref = nullptr;
+
+        // TODO @tasking @memory_order DSH remove this fence in favor of memory
+        // orders
+        Kokkos::memory_fence();
+
+        // If adding the aggregate to the waiting queue succeeds, the
+        // predecessor is not complete
+        bool pred_not_ready = predecessor_ptr->try_add_waiting(aggregate);
+
+        // NOTE! At this point it is unsafe to access aggregate (unless the
+        // enqueueing failed, so we can't use move semantics to expire it)
+
+        // we found an incomplete dependence, so we can't make task's successors
+        // ready yet
+        incomplete_dependence_found = pred_not_ready;
+
+        if (!pred_not_ready) {
+          // A predecessor was done, and we didn't enqueue the aggregate
+          // Update the aggregate's scheduling info (we still have exclusive
+          // access to it here)
+          if (predecessor_ptr->is_runnable()) {
+            _self().update_scheduling_info_from_completed_predecessor(
+                aggregate, predecessor_ptr->as_runnable_task());
+          } else {
+            KOKKOS_ASSERT(predecessor_ptr->is_aggregate());
+            _self().update_scheduling_info_from_completed_predecessor(
+                aggregate,
+                (*predecessor_ptr)
+                    .template as_aggregate<task_scheduling_info_type>());
+          }
+        }
+
+        // the reference count for the predecessor was incremented when we put
+        // it into the predecessor list, so decrement it here
+        bool should_delete =
+            predecessor_ptr->decrement_and_check_reference_count();
+        if (should_delete) {
+          // TODO @tasking @cleanup DSH better encapsulation of this!
+          _self().deallocate(std::move(*predecessor_ptr));
+        }
+
+        // Stop the loop if we found an incomplete dependence
+        if (incomplete_dependence_found) break;
+      }
+    }
+
+    // NOTE: it's not safe to access aggregate any more if an incomplete
+    // dependence was found, because some other thread could have already popped
+    // it off of another waiting queue
+
+    if (!incomplete_dependence_found) {
+      // all of the predecessors were completed, so we can complete `task`
+      _self().complete(std::move(aggregate), info);
+    }
+    // Note!! task may have been deleted at this point, so don't add anything
+    // here!
+  }
+
+  // Provide a sensible default that can be overridden
+  template <class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION void update_scheduling_info_from_completed_predecessor(
+      RunnableTaskBase<TaskQueueTraits>& ready_task,
+      RunnableTaskBase<TaskQueueTraits> const& predecessor) const {
+    // by default, tell a ready task to use the scheduling info of its most
+    // recent predecessor
+    using task_scheduling_info_type =
+        typename Derived::task_scheduling_info_type;
+    ready_task.template scheduling_info_as<task_scheduling_info_type>() =
+        predecessor.template scheduling_info_as<task_scheduling_info_type>();
+  }
+
+  // Provide a sensible default that can be overridden
+  template <class SchedulingInfo, class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION void update_scheduling_info_from_completed_predecessor(
+      AggregateTask<TaskQueueTraits, SchedulingInfo>& aggregate,
+      RunnableTaskBase<TaskQueueTraits> const& predecessor) const {
+    // by default, tell a ready task to use the scheduling info of its most
+    // recent predecessor
+    using task_scheduling_info_type =
+        typename Derived::task_scheduling_info_type;
+    aggregate.scheduling_info() =
+        predecessor.template scheduling_info_as<task_scheduling_info_type>();
+  }
+
+  // Provide a sensible default that can be overridden
+  template <class SchedulingInfo, class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION void update_scheduling_info_from_completed_predecessor(
+      AggregateTask<TaskQueueTraits, SchedulingInfo>& aggregate,
+      AggregateTask<TaskQueueTraits, SchedulingInfo> const& predecessor) const {
+    // by default, tell a ready task to use the scheduling info of its most
+    // recent predecessor
+    aggregate.scheduling_info() = predecessor.scheduling_info();
+  }
+
+  // Provide a sensible default that can be overridden
+  template <class SchedulingInfo, class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION void update_scheduling_info_from_completed_predecessor(
+      RunnableTaskBase<TaskQueueTraits>& ready_task,
+      AggregateTask<TaskQueueTraits, SchedulingInfo> const& predecessor) const {
+    // by default, tell a ready task to use the scheduling info of its most
+    // recent predecessor
+    using task_scheduling_info_type =
+        typename Derived::task_scheduling_info_type;
+    ready_task.template scheduling_info_as<task_scheduling_info_type>() =
+        predecessor.scheduling_info();
+  }
+
+  template <class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION void initialize_scheduling_info_from_predecessor(
+      TaskNode<TaskQueueTraits>& /*task*/,
+      TaskNode<TaskQueueTraits>& /*predecessor*/) const {
+    /* do nothing by default */
+  }
+
+  template <class TeamSchedulerInfo, class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION void
+  initialize_scheduling_info_from_team_scheduler_info(
+      TaskNode<TaskQueueTraits>& /*task*/,
+      TeamSchedulerInfo const& /*info*/) const {
+    /* do nothing by default */
+  }
+
+  template <class ExecutionSpace, class MemorySpace, class MemoryPool>
+  static /* constexpr */ size_t task_queue_allocation_size(
+      ExecutionSpace const&, MemorySpace const&, MemoryPool const&)
+  // requires Same<ExecutionSpace, typename Derived::execution_space>
+  //            && Same<MemorySpace, typename Derived::memory_space>
+  //            && Same<MemoryPool, typename Derived::memory_pool>
+  {
+    static_assert(
+        std::is_same<ExecutionSpace,
+                     typename Derived::execution_space>::value &&
+            std::is_same<MemorySpace, typename Derived::memory_space>::value &&
+            std::is_same<MemoryPool, typename Derived::memory_pool>::value,
+        "Type mismatch in task_queue_allocation_size customization point");
+
+    return sizeof(Derived);
+  }
+
+  // </editor-fold> end Scheduling }}}2
+  //----------------------------------------------------------------------------
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKQUEUECOMMON_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMemoryManager.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMemoryManager.hpp
new file mode 100644 (file)
index 0000000..c8039fa
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_TASKQUEUEMEMORYMANAGER_HPP
+#define KOKKOS_IMPL_TASKQUEUEMEMORYMANAGER_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <impl/Kokkos_TaskBase.hpp>
+#include <impl/Kokkos_TaskResult.hpp>
+
+#include <Kokkos_Atomic.hpp>
+#include <impl/Kokkos_OptionalRef.hpp>
+#include <impl/Kokkos_LIFO.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecSpace, class MemorySpace,
+          class MemoryPool =
+              Kokkos::MemoryPool<Kokkos::Device<ExecSpace, MemorySpace>>>
+class TaskQueueMemoryManager : public TaskQueueBase {
+ public:
+  using execution_space      = ExecSpace;
+  using memory_space         = MemorySpace;
+  using device_type          = Kokkos::Device<execution_space, memory_space>;
+  using memory_pool          = MemoryPool;
+  using allocation_size_type = size_t;
+
+ private:
+  memory_pool m_pool;
+  // TODO @tasking @generalization DSH re-enable this with a flag in the type
+  // long m_accum_alloc = 0;
+  int m_count_alloc = 0;
+  int m_max_alloc   = 0;
+
+  struct _allocation_result {
+    bool success;
+    void* pointer;
+  };
+
+  KOKKOS_INLINE_FUNCTION
+  _allocation_result _do_pool_allocate(allocation_size_type requested_size) {
+    // KOKKOS_EXPECTS(requested_size >= 0); generates a warning when
+    // allocation_size_type is unsigned
+    if (requested_size == 0) {
+      return {true, nullptr};
+    } else {
+      void* data = m_pool.allocate(static_cast<size_t>(requested_size));
+
+      Kokkos::Impl::desul_atomic_inc(
+          &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
+          Kokkos::Impl::MemoryScopeDevice());  // TODO? memory_order_relaxed
+      // TODO @tasking @minor DSH make this thread safe? (otherwise, it's just
+      // an approximation, which is probably fine...)
+      if (m_max_alloc < m_count_alloc) m_max_alloc = m_count_alloc;
+
+      return {data != nullptr, data};
+    }
+  }
+
+  template <class T, class... Args>
+  KOKKOS_INLINE_FUNCTION T* _do_contruct(void* allocated,
+                                         allocation_size_type allocated_size,
+                                         Args&&... args) {
+    static_assert(std::is_base_of<PoolAllocatedObjectBase<int32_t>, T>::value,
+                  "TaskQueueMemoryManager can only allocate objects with "
+                  "PoolAllocatedObjectBase base class");
+
+    // TODO @tasking DSH figure out why this isn't working
+    // static_assert(
+    //  std::is_constructible<T, Args..., int32_t>::value,
+    //  "TaskQueueMemoryManager can't construct object of the requested type
+    //  from the " " allocation size and the given arguments"
+    //);
+
+    auto rv = new (allocated) T(std::forward<Args>(args)..., allocated_size);
+
+    // It feels like there should be a way to check this at compile-time
+    KOKKOS_ASSERT(
+        (intptr_t)(rv) ==
+            (intptr_t)(static_cast<PoolAllocatedObjectBase<int32_t>*>(rv)) &&
+        "PoolAllocatedObjectBase must be the first base class of the allocated "
+        "type");
+
+    return rv;
+  }
+
+ public:
+  explicit TaskQueueMemoryManager(memory_pool const& pool) : m_pool(pool) {}
+
+  template <class T, class... Args>
+  KOKKOS_FUNCTION T* allocate_and_construct(Args&&... args)
+  // requires
+  //   std::is_base_of_v<PoolAllocatedObjectBase<typename
+  //   memory_pool::size_type>, T>
+  //     && std::is_constructible_v<T, Args&&..., allocation_size_type>
+  {
+    constexpr auto allocation_size = sizeof(T);
+
+    auto result = _do_pool_allocate(allocation_size);
+
+    KOKKOS_ASSERT(result.success && "Memory allocation failure");
+
+    auto rv = _do_contruct<T>(result.pointer, allocation_size,
+                              std::forward<Args>(args)...);
+
+    KOKKOS_ENSURES(intptr_t(rv) % alignof(T) == 0 &&
+                   "alignment not preserved!");
+
+    return rv;
+  }
+
+  template <class T, class VLAValueType, class... Args>
+  KOKKOS_INLINE_FUNCTION T* allocate_and_construct_with_vla_emulation(
+      allocation_size_type n_vla_entries, Args&&... args)
+  // requires
+  //   std::is_base_of_v<PoolAllocatedObjectBase<typename
+  //   memory_pool::size_type>, T>
+  //     && std::is_base_of<ObjectWithVLAEmulation<T, VLAValueType>, T>::value
+  //     && std::is_constructible_v<T, allocation_size_type, Args&&...>
+  {
+    static_assert(
+        std::is_base_of<ObjectWithVLAEmulation<T, VLAValueType>, T>::value,
+        "Can't append emulated variable length array of type with greater "
+        "alignment than"
+        "  the type to which the VLA is being appended");
+
+    using vla_emulation_base = ObjectWithVLAEmulation<T, VLAValueType>;
+
+    auto const allocation_size =
+        vla_emulation_base::required_allocation_size(n_vla_entries);
+    auto result = _do_pool_allocate(allocation_size);
+
+    KOKKOS_ASSERT(result.success && "Memory allocation failure");
+
+    auto rv = _do_contruct<T>(result.pointer, allocation_size,
+                              std::forward<Args>(args)...);
+
+    KOKKOS_ENSURES(intptr_t(rv) % alignof(T) == 0);
+
+    return rv;
+  }
+
+  template <class CountType>
+  KOKKOS_INLINE_FUNCTION void deallocate(
+      PoolAllocatedObjectBase<CountType>&& obj) {
+    m_pool.deallocate((void*)&obj, 1);
+    Kokkos::Impl::desul_atomic_dec(
+        &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
+        Kokkos::Impl::MemoryScopeDevice());  // TODO? memory_order_relaxed
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  memory_pool& get_memory_pool() { return m_pool; }
+  KOKKOS_INLINE_FUNCTION
+  memory_pool const& get_memory_pool() const { return m_pool; }
+
+  KOKKOS_INLINE_FUNCTION
+  int allocation_count() const noexcept { return m_count_alloc; }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+////////////////////////////////////////////////////////////////////////////////
+// END OLD CODE
+////////////////////////////////////////////////////////////////////////////////
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKQUEUEMEMORYMANAGER_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMultiple.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMultiple.hpp
new file mode 100644 (file)
index 0000000..31c7376
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_HPP
+#define KOKKOS_IMPL_TASKQUEUEMULTIPLE_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <impl/Kokkos_TaskBase.hpp>
+#include <impl/Kokkos_TaskResult.hpp>
+#include <impl/Kokkos_TaskQueue.hpp>
+
+#include <Kokkos_Atomic.hpp>
+
+#include <string>
+#include <typeinfo>
+#include <cassert>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename ExecSpace,
+          typename MemorySpace = typename ExecSpace::memory_space>
+class LeagueQueueCollection;
+
+template <class ExecSpace, class MemorySpace>
+class TaskQueueMultiple : public TaskQueue<ExecSpace, MemorySpace> {
+ private:
+  using base_t             = TaskQueue<ExecSpace, MemorySpace>;
+  using queue_collection_t = LeagueQueueCollection<ExecSpace, MemorySpace>;
+
+  int m_league_rank = static_cast<int>(KOKKOS_INVALID_INDEX);
+
+  // This pointer is owning only if m_league_rank == 0
+  queue_collection_t* m_other_queues = nullptr;
+
+ public:
+  struct Destroy {
+    TaskQueueMultiple* m_queue;
+    void destroy_shared_allocation();
+  };
+
+  using team_queue_type = TaskQueueMultiple;
+
+  TaskQueueMultiple(int arg_league_rank, queue_collection_t* arg_other_queues,
+                    typename base_t::memory_pool const& arg_memory_pool)
+      : base_t(arg_memory_pool),
+        m_league_rank(arg_league_rank),
+        m_other_queues(arg_other_queues) {}
+
+  explicit TaskQueueMultiple(
+      typename base_t::memory_pool const& arg_memory_pool)
+      : base_t(arg_memory_pool), m_league_rank(0) {
+    void* other_queues_buffer =
+        typename base_t::memory_space{}.allocate(sizeof(queue_collection_t));
+    m_other_queues = new (other_queues_buffer) queue_collection_t(this);
+  }
+
+  ~TaskQueueMultiple() {
+    if (m_league_rank == 0 && m_other_queues != nullptr) {
+      m_other_queues->~queue_collection_t();
+      typename base_t::memory_space{}.deallocate(m_other_queues,
+                                                 sizeof(queue_collection_t));
+    }
+    // rest of destruction is handled in the base class
+  }
+
+  //----------------------------------------
+
+  void initialize_team_queues(int arg_league_size) const noexcept {
+    m_other_queues->initialize_team_queues(arg_league_size, this->m_memory);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  team_queue_type& get_team_queue(int arg_league_rank) noexcept {
+    if (arg_league_rank == m_league_rank)
+      return *this;
+    else
+      return m_other_queues->get_team_queue(arg_league_rank);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  typename base_t::task_root_type* attempt_to_steal_task() noexcept {
+    TaskBase* rv        = nullptr;
+    auto* const end_tag = reinterpret_cast<TaskBase*>(TaskBase::EndTag);
+
+    if (m_other_queues == nullptr) {
+      Kokkos::abort("attempted to steal task before queues were initialized!");
+    }
+
+    // Loop by priority and then type, and then team
+    for (int i = 0; i < base_t::NumQueue; ++i) {
+      for (int j = 0; j < 2; ++j) {
+        // for now, always start by trying to steal from team zero
+        for (int iteam = 0; iteam < m_other_queues->size(); ++iteam) {
+          if (iteam == m_league_rank) continue;
+          auto& steal_from = get_team_queue(iteam);
+          if (*((volatile int*)&steal_from.m_ready_count) > 0) {
+            // we've found at least one queue that's not done, so even if we
+            // can't pop something off of it we shouldn't return a nullptr
+            // indicating completion.  rv will be end_tag when the pop fails
+            rv = base_t::pop_ready_task(&steal_from.m_ready[i][j]);
+            if (rv != end_tag) {
+              // task stolen.
+              // first increment our ready count, then decrement the ready count
+              // on the other queue:
+              Kokkos::Impl::desul_atomic_inc(
+                  &this->m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
+                  Kokkos::Impl::MemoryScopeDevice());  // TODO?
+                                                       // memory_order_relaxed
+              Kokkos::Impl::desul_atomic_dec(
+                  &steal_from.m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
+                  Kokkos::Impl::MemoryScopeDevice());  // TODO?
+                                                       // memory_order_relaxed
+              return rv;
+            }
+          }
+        }
+      }
+    }
+
+    // at this point, rv will only be nullptr if *all* of the queues had an
+    // m_ready_count of 0.  This indicates quiescence.  If at least some of them
+    // had non-zero, there would have been at least one pop_ready_task that
+    // was called and returned end_tag if it couldn't pop a task
+    return rv;
+  }
+};
+
+template <typename ExecSpace, typename MemorySpace>
+class LeagueQueueCollection {
+ private:
+  using execution_space     = ExecSpace;
+  using memory_space        = MemorySpace;
+  using device_type         = Kokkos::Device<execution_space, memory_space>;
+  using memory_pool         = Kokkos::MemoryPool<device_type>;
+  using team_queue_type     = TaskQueueMultiple<execution_space, memory_space>;
+  using team_scheduler_type = BasicTaskScheduler<ExecSpace, team_queue_type>;
+  using specialization      = TaskQueueSpecialization<team_scheduler_type>;
+
+  enum : long { max_num_queues = 6 };  // specialization::max_league_size };
+
+  // this is a non-owning pointer
+  team_queue_type* m_rank_zero_queue = nullptr;
+  // This really needs to be an optional<TaskQueue<ExecSpace>>
+  union optional_queue {
+    KOKKOS_INLINE_FUNCTION
+    optional_queue() : uninitialized(0) {}
+    KOKKOS_INLINE_FUNCTION
+    ~optional_queue() { uninitialized = 0; }
+    char uninitialized;
+    team_queue_type initialized;
+  } m_queues[max_num_queues];
+  int m_size = static_cast<int>(KOKKOS_INVALID_INDEX);
+
+ public:
+  LeagueQueueCollection()                             = delete;
+  LeagueQueueCollection(LeagueQueueCollection const&) = delete;
+  LeagueQueueCollection(LeagueQueueCollection&&)      = delete;
+  LeagueQueueCollection& operator=(LeagueQueueCollection const&) = delete;
+  LeagueQueueCollection& operator=(LeagueQueueCollection&&) = delete;
+
+  ~LeagueQueueCollection() {
+    // destroy only the initialized queues that we own
+    for (int iteam = 0; iteam < m_size - 1; ++iteam) {
+      m_queues[iteam].initialized.~team_queue_type();
+      m_queues[iteam].uninitialized = 0;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  explicit LeagueQueueCollection(team_queue_type* arg_rank_zero_queue)
+      : m_rank_zero_queue(arg_rank_zero_queue), m_size(1) {}
+
+  void initialize_team_queues(int arg_count,
+                              memory_pool const& arg_memory_pool) noexcept {
+    arg_count = std::min((int)max_num_queues, arg_count);
+    // assert(arg_count <= max_num_queues);
+    if (arg_count > m_size) {
+      for (int i = m_size; i < arg_count; ++i) {
+        new (&m_queues[i - 1].initialized)
+            team_queue_type(i, this, arg_memory_pool);
+      }
+      m_size = arg_count;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr int size() const noexcept { return m_size; }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr bool initialized() const noexcept {
+    return m_size != int(KOKKOS_INVALID_INDEX);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  team_queue_type& get_team_queue(int iteam) {
+    iteam %= max_num_queues;
+#if !defined(__HIP_DEVICE_COMPILE__) && !defined(__CUDA_ARCH__)
+    assert(initialized());
+    assert(iteam < m_size);
+    assert(iteam >= 0);
+#endif
+    if (iteam == 0)
+      return *m_rank_zero_queue;
+    else
+      return m_queues[iteam - 1].initialized;
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#include <impl/Kokkos_TaskQueueMultiple_impl.hpp>
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMultiple_impl.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueueMultiple_impl.hpp
new file mode 100644 (file)
index 0000000..1af9791
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP
+#define KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <impl/Kokkos_TaskQueueMultiple.hpp>
+
+#define KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING_MULTIPLE 0
+
+namespace Kokkos {
+namespace Impl {
+
+template <class ExecSpace, class MemorySpace>
+void TaskQueueMultiple<ExecSpace,
+                       MemorySpace>::Destroy::destroy_shared_allocation() {
+// KOKKOS WORKAROUND for CUDA 10.1 with GCC 7.3.0
+#if (KOKKOS_COMPILER_CUDA_VERSION == 101) && defined(KOKKOS_COMPILER_NVCC) && \
+    (KOKKOS_COMPILER_GNU >= 730)
+  (*m_queue).get_team_queue(0).~TaskQueueMultiple();
+#else
+  m_queue->get_team_queue(0).~TaskQueueMultiple();
+#endif
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKQUEUEMULTIPLE_IMPL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueue_impl.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskQueue_impl.hpp
new file mode 100644 (file)
index 0000000..324227c
--- /dev/null
@@ -0,0 +1,675 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_TASKQUEUE_IMPL_HPP
+#define KOKKOS_IMPL_TASKQUEUE_IMPL_HPP
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#define KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING 0
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+void TaskQueue<ExecSpace, MemorySpace>::Destroy::destroy_shared_allocation() {
+  m_queue->~TaskQueue();
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+TaskQueue<ExecSpace, MemorySpace>::TaskQueue(
+    typename TaskQueue<ExecSpace, MemorySpace>::memory_pool const
+        &arg_memory_pool)
+    : m_memory(arg_memory_pool),
+      m_ready()
+      //, m_accum_alloc(0)
+      //, m_count_alloc(0)
+      //, m_max_alloc(0)
+      ,
+      m_ready_count(0) {
+  for (int i = 0; i < NumQueue; ++i) {
+    m_ready[i][0] = (task_root_type *)task_root_type::EndTag;
+    m_ready[i][1] = (task_root_type *)task_root_type::EndTag;
+  }
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+TaskQueue<ExecSpace, MemorySpace>::~TaskQueue() {
+  // Verify that queues are empty and ready count is zero
+
+  for (int i = 0; i < NumQueue; ++i) {
+    for (int j = 0; j < 2; ++j) {
+      if (m_ready[i][j] != (task_root_type *)task_root_type::EndTag) {
+        Kokkos::abort("TaskQueue::~TaskQueue ERROR: has ready tasks");
+      }
+    }
+  }
+
+  if (0 != m_ready_count) {
+    Kokkos::abort("TaskQueue::~TaskQueue ERROR: has ready or executing tasks");
+  }
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION void TaskQueue<ExecSpace, MemorySpace>::decrement(
+    TaskQueue<ExecSpace, MemorySpace>::task_root_type *task) {
+  task_root_type volatile &t = *task;
+
+  const int count = Kokkos::atomic_fetch_add(&(t.m_ref_count), -1);
+  Kokkos::memory_fence();
+
+#if KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING
+  if (1 == count) {
+    printf("decrement-destroy( 0x%lx { 0x%lx %d %d } )\n", uintptr_t(task),
+           uintptr_t(task->m_next), int(task->m_task_type),
+           int(task->m_ref_count));
+  }
+#endif
+
+  if ((1 == count) && (t.m_next == (task_root_type *)task_root_type::LockTag)) {
+    // Reference count is zero and task is complete, deallocate.
+
+    // TaskQueue< ExecSpace, MemorySpace> * const queue =
+    //  static_cast<scheduler_type const *>( t.m_scheduler )->m_queue;
+    auto *const volatile queue = static_cast<TaskQueue *>(t.m_queue);
+
+    // TODO @tasking @minor DSH this should call the destructor for a
+    // non-trivially destructible type (possibly just ignore this in the old
+    // version, though?) (Can't just do this; it needs to be queued since it's
+    // device code if(task->m_destroy) task->m_destroy(task);
+
+    queue->deallocate(task, t.m_alloc_size);
+  } else if (count <= 1) {
+    Kokkos::abort(
+        "TaskScheduler task has negative reference count or is incomplete");
+  }
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION size_t
+TaskQueue<ExecSpace, MemorySpace>::allocate_block_size(size_t n) {
+  return m_memory.allocate_block_size(n);
+}
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION void *TaskQueue<ExecSpace, MemorySpace>::allocate(size_t n) {
+  void *const p = m_memory.allocate(n);
+
+  if (p) {
+    Kokkos::Impl::desul_atomic_inc(
+        &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
+        Kokkos::Impl::MemoryScopeDevice());  // TODO? memory_order_relaxed
+
+    // if ( m_max_alloc < m_count_alloc ) m_max_alloc = m_count_alloc ;
+  }
+
+  return p;
+}
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION void TaskQueue<ExecSpace, MemorySpace>::deallocate(void *p,
+                                                                   size_t n) {
+  m_memory.deallocate(p, n);
+  Kokkos::Impl::desul_atomic_dec(
+      &m_count_alloc, Kokkos::Impl::MemoryOrderSeqCst(),
+      Kokkos::Impl::MemoryScopeDevice());  // TODO? memory_order_relaxed
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION bool TaskQueue<ExecSpace, MemorySpace>::push_task(
+    TaskQueue<ExecSpace, MemorySpace>::task_root_type *volatile *const queue,
+    TaskQueue<ExecSpace, MemorySpace>::task_root_type *const task) {
+  // Push task into a concurrently pushed and popped queue.
+  // The queue can be either a ready task queue or a waiting task queue.
+  // The queue is a linked list where 'task->m_next' form the links.
+  // Fail the push attempt if the queue is locked;
+  // otherwise retry until the push succeeds.
+
+#if KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING
+  printf("push_task( 0x%lx { 0x%lx } 0x%lx { 0x%lx 0x%lx %d %d %d } )\n",
+         uintptr_t(queue), uintptr_t(*queue), uintptr_t(task),
+         uintptr_t(task->m_wait), uintptr_t(task->m_next), task->m_task_type,
+         task->m_priority, task->m_ref_count);
+#endif
+
+  task_root_type *const zero = nullptr;
+  task_root_type *const lock = (task_root_type *)task_root_type::LockTag;
+
+  task_root_type *volatile &next = task->m_next;
+
+  if (zero != next) {
+    Kokkos::abort(
+        "TaskQueue::push_task ERROR: already a member of another queue");
+  }
+
+  // store the head of the queue
+  task_root_type *old_head = *queue;
+
+  while (old_head != lock) {
+    // set task->next to the head of the queue
+    next = old_head;
+
+    // Do not proceed until 'next' has been stored.
+    Kokkos::memory_fence();
+
+    // store the old head
+    task_root_type *const old_head_tmp = old_head;
+
+    // attempt to swap task with the old head of the queue
+    // as if this were done atomically:
+    //   if(*queue == old_head) {
+    //     *queue = task;
+    //   }
+    //   old_head = *queue;
+    old_head = Kokkos::Impl::desul_atomic_compare_exchange(
+        const_cast<task_root_type **>(queue), old_head, task,
+        Kokkos::Impl::MemoryOrderSeqCst(), Kokkos::Impl::MemoryScopeDevice());
+
+    if (old_head_tmp == old_head) return true;
+  }
+
+  // Failed, replace 'task->m_next' value since 'task' remains
+  // not a member of a queue.
+
+  next = zero;
+
+  // Do not proceed until 'next' has been stored.
+  Kokkos::memory_fence();
+
+  return false;
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION typename TaskQueue<ExecSpace, MemorySpace>::task_root_type *
+TaskQueue<ExecSpace, MemorySpace>::pop_ready_task(
+    TaskQueue<ExecSpace, MemorySpace>::task_root_type *volatile *const queue) {
+  // Pop task from a concurrently pushed and popped ready task queue.
+  // The queue is a linked list where 'task->m_next' form the links.
+
+  task_root_type *const lock = (task_root_type *)task_root_type::LockTag;
+  task_root_type *const end  = (task_root_type *)task_root_type::EndTag;
+
+  // *queue is
+  //   end   => an empty queue
+  //   lock  => a locked queue
+  //   valid
+
+  // Retry until the lock is acquired or the queue is empty.
+
+  task_root_type *task = *queue;
+
+  while (end != task) {
+    // The only possible values for the queue are
+    // (1) lock, (2) end, or (3) a valid task.
+    // Thus zero will never appear in the queue.
+    //
+    // If queue is locked then just read by guaranteeing the CAS will fail.
+
+    if (lock == task) task = nullptr;
+
+    task_root_type *const x = task;
+
+    //    task = Kokkos::atomic_compare_exchange(queue, x, lock);
+    task = Kokkos::Impl::desul_atomic_compare_exchange(
+        const_cast<task_root_type **>(queue), x, lock,
+        Kokkos::Impl::MemoryOrderSeqCst(), Kokkos::Impl::MemoryScopeDevice());
+
+    if (x == task) {
+      // CAS succeeded and queue is locked
+      //
+      // This thread has locked the queue and removed 'task' from the queue.
+      // Extract the next entry of the queue from 'task->m_next'
+      // and mark 'task' as popped from a queue by setting
+      // 'task->m_next = lock'.
+      //
+      // Place the next entry in the head of the queue,
+      // which also unlocks the queue.
+      //
+      // This thread has exclusive access to
+      // the queue and the popped task's m_next.
+
+      Kokkos::memory_fence();
+
+      task_root_type *volatile &next = task->m_next;
+
+      // This algorithm is not lockfree because a adversarial scheduler could
+      // context switch this thread at this point and the rest of the threads
+      // calling this method would never make forward progress
+
+      *queue = next;
+      next   = lock;
+
+      Kokkos::memory_fence();
+
+#if KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING
+      printf("pop_ready_task( 0x%lx 0x%lx { 0x%lx 0x%lx %d %d %d } )\n",
+             uintptr_t(queue), uintptr_t(task), uintptr_t(task->m_wait),
+             uintptr_t(task->m_next), int(task->m_task_type),
+             int(task->m_priority), int(task->m_ref_count));
+#endif
+
+      return task;
+    }
+  }
+
+  return end;
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION void TaskQueue<ExecSpace, MemorySpace>::schedule_runnable(
+    TaskQueue<ExecSpace, MemorySpace>::task_root_type *const task) {
+  // Schedule a runnable task upon construction / spawn
+  // and upon completion of other tasks that 'task' is waiting on.
+  //
+  // Precondition:
+  // - called by a single thread for the input task
+  // - calling thread has exclusive access to the task
+  // - task is not a member of a queue
+  // - if runnable then task is either constructing or respawning
+  //
+  //   Constructing state:
+  //     task->m_wait == 0
+  //     task->m_next == dependence or 0
+  //   Respawn state:
+  //     task->m_wait == head of linked list: 'end' or valid task
+  //     task->m_next == dependence or 0
+  //
+  //  Task state transition:
+  //     Constructing ->  Waiting
+  //     Respawn      ->  Waiting
+  //
+  //  Postcondition on task state:
+  //     task->m_wait == head of linked list (queue)
+  //     task->m_next == member of linked list (queue)
+
+#if KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING
+  printf("schedule_runnable( 0x%lx { 0x%lx 0x%lx %d %d %d }\n", uintptr_t(task),
+         uintptr_t(task->m_wait), uintptr_t(task->m_next), task->m_task_type,
+         task->m_priority, task->m_ref_count);
+#endif
+
+  task_root_type *const zero = nullptr;
+  task_root_type *const lock = (task_root_type *)task_root_type::LockTag;
+  task_root_type *const end  = (task_root_type *)task_root_type::EndTag;
+
+  task_root_type volatile &t = *task;
+
+  bool respawn = false;
+
+  //----------------------------------------
+
+  if (zero == t.m_wait) {
+    // Task in Constructing state
+    // - Transition to Waiting state
+    // Preconditions:
+    // - call occurs exclusively within a single thread
+
+    t.m_wait = end;
+    // Task in Waiting state
+  } else if (lock != t.m_wait) {
+    // Task in Executing state with Respawn request
+    // - Update dependence
+    // - Transition to Waiting state
+    respawn = true;
+  } else {
+    // Task in Complete state
+    Kokkos::abort("TaskQueue::schedule_runnable ERROR: task is complete");
+  }
+
+  //----------------------------------------
+  // Scheduling a runnable task which may have a depencency 'dep'.
+  // Extract dependence, if any, from task->m_next.
+  // If 'dep' is not null then attempt to push 'task'
+  // into the wait queue of 'dep'.
+  // If the push succeeds then 'task' may be
+  // processed or executed by another thread at any time.
+  // If the push fails then 'dep' is complete and 'task'
+  // is ready to execute.
+
+  // Exclusive access so don't need an atomic exchange
+  // task_root_type * dep = Kokkos::atomic_exchange( & task->m_next , zero );
+  task_root_type *dep = t.m_next;
+  t.m_next            = zero;
+
+  Kokkos::memory_fence();
+
+  // If we don't have a dependency, or if pushing onto the wait queue of that
+  // dependency failed (since the only time that queue should be locked is when
+  // the task is transitioning to complete?)
+  const bool is_ready = (nullptr == dep) || (!push_task(&dep->m_wait, task));
+
+  if ((nullptr != dep) && respawn) {
+    // Reference count for dep was incremented when
+    // respawn assigned dependency to task->m_next
+    // so that if dep completed prior to the
+    // above push_task dep would not be destroyed.
+    // dep reference count can now be decremented,
+    // which may deallocate the task.
+    TaskQueue::assign(&dep, nullptr);
+  }
+
+  if (is_ready) {
+    // No dependence or 'dep' is complete so push task into ready queue.
+    // Increment the ready count before pushing into ready queue
+    // to track number of ready + executing tasks.
+    // The ready count will be decremented when the task is complete.
+
+    Kokkos::Impl::desul_atomic_inc(
+        &m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
+        Kokkos::Impl::MemoryScopeDevice());  // TODO? memory_order_relaxed
+
+    task_root_type *volatile *const ready_queue =
+        &m_ready[t.m_priority][t.m_task_type];
+
+    // A push_task fails if the ready queue is locked.
+    // A ready queue is only locked during a push or pop;
+    // i.e., it is never permanently locked.
+    // Retry push to ready queue until it succeeds.
+    // When the push succeeds then 'task' may be
+    // processed or executed by another thread at any time.
+
+    while (!push_task(ready_queue, task))
+      ;
+  }
+
+  //----------------------------------------
+  // Postcondition:
+  // - A runnable 'task' was pushed into a wait or ready queue.
+  // - Concurrent execution may have already popped 'task'
+  //   from a queue and processed it as appropriate.
+}
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION void TaskQueue<ExecSpace, MemorySpace>::schedule_aggregate(
+    TaskQueue<ExecSpace, MemorySpace>::task_root_type *const task) {
+  // Schedule an aggregate task upon construction
+  // and upon completion of other tasks that 'task' is waiting on.
+  //
+  // Precondition:
+  // - called by a single thread for the input task
+  // - calling thread has exclusive access to the task
+  // - task is not a member of a queue
+  //
+  //   Constructing state:
+  //     task->m_wait == 0
+  //     task->m_next == dependence or 0
+  //
+  //  Task state transition:
+  //     Constructing ->  Waiting
+  //
+  //  Postcondition on task state:
+  //     task->m_wait == head of linked list (queue)
+  //     task->m_next == member of linked list (queue)
+
+#if KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING
+  printf("schedule_aggregate( 0x%lx { 0x%lx 0x%lx %d %d %d %d }\n",
+         uintptr_t(task), uintptr_t(task->m_wait), uintptr_t(task->m_next),
+         task->m_dep_count, task->m_task_type, task->m_priority,
+         task->m_ref_count);
+#endif
+
+  task_root_type *const zero = nullptr;
+  task_root_type *const lock = (task_root_type *)task_root_type::LockTag;
+  task_root_type *const end  = (task_root_type *)task_root_type::EndTag;
+
+  task_root_type volatile &t = *task;
+
+  //----------------------------------------
+
+  if (zero == t.m_wait) {
+    // Task in Constructing state
+    // - Transition to Waiting state
+    // Preconditions:
+    // - call occurs exclusively within a single thread
+
+    t.m_wait = end;
+    // Task in Waiting state
+  } else if (lock == t.m_wait) {
+    // Task in Complete state
+    Kokkos::abort("TaskQueue::schedule_aggregate ERROR: task is complete");
+  }
+
+  //----------------------------------------
+  // Scheduling a 'when_all' task with multiple dependences.
+  // This scheduling may be called when the 'when_all' is
+  // (1) created or
+  // (2) being removed from a completed task's wait list.
+
+  task_root_type *volatile *const aggr = t.aggregate_dependences();
+
+  // Assume the 'when_all' is complete until a dependence is
+  // found that is not complete.
+
+  bool is_complete = true;
+
+  for (int i = t.m_dep_count; 0 < i && is_complete;) {
+    --i;
+
+    // Loop dependences looking for an incomplete task.
+    // Add this task to the incomplete task's wait queue.
+
+    // Remove a task 'x' from the dependence list.
+    // The reference count of 'x' was incremented when
+    // it was assigned into the dependence list.
+
+    // Exclusive access so don't need an atomic exchange
+    // task_root_type * x = Kokkos::atomic_exchange( aggr + i , zero );
+    task_root_type *x = aggr[i];
+    aggr[i]           = zero;
+
+    if (x) {
+      // If x->m_wait is not locked then push succeeds
+      // and the aggregate is not complete.
+      // If the push succeeds then this when_all 'task' may be
+      // processed by another thread at any time.
+      // For example, 'x' may be completeed by another
+      // thread and then re-schedule this when_all 'task'.
+
+      is_complete = !push_task(&x->m_wait, task);
+
+      // Decrement reference count which had been incremented
+      // when 'x' was added to the dependence list.
+
+      TaskQueue::assign(&x, zero);
+    }
+  }
+
+  if (is_complete) {
+    // The when_all 'task' was not added to a wait queue because
+    // all dependences were complete so this aggregate is complete.
+    // Complete the when_all 'task' to schedule other tasks
+    // that are waiting for the when_all 'task' to complete.
+
+    t.m_next = lock;
+
+    complete(task);
+
+    // '*task' may have been deleted upon completion
+  }
+
+  //----------------------------------------
+  // Postcondition:
+  // - An aggregate 'task' was either pushed to a wait queue or completed.
+  // - Concurrent execution may have already popped 'task'
+  //   from a queue and processed it as appropriate.
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION void TaskQueue<ExecSpace, MemorySpace>::reschedule(
+    task_root_type *task) {
+  // Precondition:
+  //   task is in Executing state
+  //   task->m_next == LockTag
+  //
+  // Postcondition:
+  //   task is in Executing-Respawn state
+  //   task->m_next == 0 (no dependence)
+
+  task_root_type *const zero = nullptr;
+  task_root_type *const lock = (task_root_type *)task_root_type::LockTag;
+  if (lock != Kokkos::Impl::desul_atomic_exchange(
+                  &task->m_next, zero, Kokkos::Impl::MemoryOrderSeqCst(),
+                  Kokkos::Impl::MemoryScopeDevice())) {
+    Kokkos::abort("TaskScheduler::respawn ERROR: already respawned");
+  }
+}
+
+//----------------------------------------------------------------------------
+
+template <typename ExecSpace, typename MemorySpace>
+KOKKOS_FUNCTION void TaskQueue<ExecSpace, MemorySpace>::complete(
+    TaskQueue<ExecSpace, MemorySpace>::task_root_type *task) {
+  // Complete a runnable task that has finished executing
+  // or a when_all task when all of its dependeneces are complete.
+
+  task_root_type *const zero = nullptr;
+  task_root_type *const lock = (task_root_type *)task_root_type::LockTag;
+  task_root_type *const end  = (task_root_type *)task_root_type::EndTag;
+
+#if KOKKOS_IMPL_DEBUG_TASKDAG_SCHEDULING
+  printf("complete( 0x%lx { 0x%lx 0x%lx %d %d %d }\n", uintptr_t(task),
+         uintptr_t(task->m_wait), uintptr_t(task->m_next), task->m_task_type,
+         task->m_priority, task->m_ref_count);
+#endif
+
+  task_root_type volatile &t = *task;
+
+  const bool runnable = task_root_type::Aggregate != t.m_task_type;
+
+  //----------------------------------------
+
+  if (runnable && lock != t.m_next) {
+    // Is a runnable task has finished executing and requested respawn.
+    // Schedule the task for subsequent execution.
+
+    schedule_runnable(task);
+  }
+  //----------------------------------------
+  else {
+    // Is either an aggregate or a runnable task that executed
+    // and did not respawn.  Transition this task to complete.
+
+    // If 'task' is an aggregate then any of the runnable tasks that
+    // it depends upon may be attempting to complete this 'task'.
+    // Must only transition a task once to complete status.
+    // This is controlled by atomically locking the wait queue.
+
+    // Stop other tasks from adding themselves to this task's wait queue
+    // by locking the head of this task's wait queue.
+    task_root_type *x = Kokkos::Impl::desul_atomic_exchange(
+        const_cast<task_root_type **>(&t.m_wait), lock,
+        Kokkos::Impl::MemoryOrderSeqCst(), Kokkos::Impl::MemoryScopeDevice());
+
+    if (x != (task_root_type *)lock) {
+      // This thread has transitioned this 'task' to complete.
+      // 'task' is no longer in a queue and is not executing
+      // so decrement the reference count from 'task's creation.
+      // If no other references to this 'task' then it will be deleted.
+
+      TaskQueue::assign(&task, zero);
+
+      // This thread has exclusive access to the wait list so
+      // the concurrency-safe pop_ready_task function is not needed.
+      // Schedule the tasks that have been waiting on the input 'task',
+      // which may have been deleted.
+
+      while (x != end) {
+        // Have exclusive access to 'x' until it is scheduled
+        // Set x->m_next = zero  <=  no dependence, not a respawn
+
+        task_root_type volatile &vx = *x;
+
+        task_root_type *const next = vx.m_next;
+        vx.m_next                  = nullptr;
+
+        Kokkos::memory_fence();
+
+        if (task_root_type::Aggregate != vx.m_task_type) {
+          schedule_runnable(x);
+        } else {
+#if !defined(__HIP_DEVICE_COMPILE__)
+          schedule_aggregate(x);
+#endif
+        }
+
+        x = next;
+      }
+    }
+  }
+
+  if (runnable) {
+    // A runnable task was popped from a ready queue and executed.
+    // If respawned into a ready queue then the ready count was incremented
+    // so decrement whether respawned or not.
+    Kokkos::Impl::desul_atomic_dec(
+        &m_ready_count, Kokkos::Impl::MemoryOrderSeqCst(),
+        Kokkos::Impl::MemoryScopeDevice());  // TODO? memory_order_relaxed
+  }
+}
+
+//----------------------------------------------------------------------------
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKQUEUE_IMPL_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskResult.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskResult.hpp
new file mode 100644 (file)
index 0000000..7c89354
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+// Experimental unified task-data parallel manycore LDRD
+
+#ifndef KOKKOS_IMPL_TASKRESULT_HPP
+#define KOKKOS_IMPL_TASKRESULT_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_TaskScheduler_fwd.hpp>
+#include <Kokkos_Core_fwd.hpp>
+
+#include <impl/Kokkos_TaskBase.hpp>
+#include <impl/Kokkos_TaskNode.hpp>
+
+#include <string>
+#include <typeinfo>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename ResultType>
+struct TaskResult {
+  enum : int32_t { size = sizeof(ResultType) };
+
+  using reference_type = ResultType&;
+
+  template <class CountType>
+  KOKKOS_INLINE_FUNCTION static ResultType* ptr(
+      PoolAllocatedObjectBase<CountType>* task) {
+    return reinterpret_cast<ResultType*>(reinterpret_cast<char*>(task) +
+                                         task->get_allocation_size() -
+                                         sizeof(ResultType));
+  }
+
+  KOKKOS_INLINE_FUNCTION static ResultType* ptr(TaskBase* task) {
+    return reinterpret_cast<ResultType*>(reinterpret_cast<char*>(task) +
+                                         task->m_alloc_size -
+                                         sizeof(ResultType));
+  }
+
+  KOKKOS_INLINE_FUNCTION static reference_type get(TaskBase* task) {
+    return *ptr(task);
+  }
+
+  template <class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION static reference_type get(
+      TaskNode<TaskQueueTraits>* task) {
+    return *ptr(task);
+  }
+
+  KOKKOS_INLINE_FUNCTION static void destroy(TaskBase* task) {
+    get(task).~ResultType();
+  }
+
+  // template <class TaskQueueTraits>
+  // KOKKOS_INLINE_FUNCTION static
+  // void destroy( TaskNode<TaskQueueTraits>* task )
+  //{ get(task).~ResultType(); }
+};
+
+template <>
+struct TaskResult<void> {
+  enum : int32_t { size = 0 };
+
+  using reference_type = void;
+
+  template <class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION static void* ptr(TaskNode<TaskQueueTraits>* /*task*/) {
+    return nullptr;
+  }
+
+  KOKKOS_INLINE_FUNCTION static void* ptr(TaskBase*) { return nullptr; }
+
+  template <class TaskQueueTraits>
+  KOKKOS_INLINE_FUNCTION static reference_type get(
+      TaskNode<TaskQueueTraits>* /*task*/) { /* Should never be called */
+  }
+
+  KOKKOS_INLINE_FUNCTION static reference_type get(TaskBase*) {}
+
+  KOKKOS_INLINE_FUNCTION static void destroy(TaskBase* /*task*/) {}
+
+  // template <class TaskQueueTraits>
+  // KOKKOS_INLINE_FUNCTION static
+  // void destroy( TaskNode<TaskQueueTraits>* task )
+  //{ }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_TASKRESULT_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskTeamMember.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_TaskTeamMember.hpp
new file mode 100644 (file)
index 0000000..1d6c766
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_TASKTEAMMEMBER_HPP
+#define KOKKOS_TASKTEAMMEMBER_HPP
+
+//----------------------------------------------------------------------------
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_TaskScheduler_fwd.hpp>
+//----------------------------------------------------------------------------
+
+#include <Kokkos_MemoryPool.hpp>
+
+#include <Kokkos_Future.hpp>
+#include <impl/Kokkos_TaskQueue.hpp>
+#include <impl/Kokkos_SingleTaskQueue.hpp>
+#include <impl/Kokkos_TaskQueueMultiple.hpp>
+#include <impl/Kokkos_TaskPolicyData.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class TeamMember, class Scheduler>
+class TaskTeamMemberAdapter : public TeamMember {
+ private:
+  Scheduler m_scheduler;
+
+ public:
+  //----------------------------------------
+
+  // Forward everything but the Scheduler to the constructor of the TeamMember
+  // type that we're adapting
+  template <typename... Args>
+  KOKKOS_INLINE_FUNCTION explicit TaskTeamMemberAdapter(
+      std::enable_if_t<std::is_constructible<TeamMember, Args...>::value,
+                       Scheduler>
+          arg_scheduler,
+      Args&&... args)  // TODO @tasking @minor DSH noexcept specification
+      : TeamMember(std::forward<Args>(args)...),
+        m_scheduler(
+            std::move(arg_scheduler).get_team_scheduler(this->league_rank())) {}
+
+  // (rule of 6 constructors)
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskTeamMemberAdapter() = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskTeamMemberAdapter(TaskTeamMemberAdapter const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskTeamMemberAdapter(TaskTeamMemberAdapter&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskTeamMemberAdapter& operator=(TaskTeamMemberAdapter const&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION
+  TaskTeamMemberAdapter& operator=(TaskTeamMemberAdapter&&) = default;
+
+  KOKKOS_DEFAULTED_FUNCTION ~TaskTeamMemberAdapter() = default;
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  Scheduler const& scheduler() const noexcept { return m_scheduler; }
+
+  KOKKOS_INLINE_FUNCTION
+  Scheduler& scheduler() noexcept { return m_scheduler; }
+
+  //----------------------------------------
+};
+
+}  // end namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_TASKTEAMMEMBER_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Timer.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Timer.hpp
new file mode 100644 (file)
index 0000000..6edf571
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPLWALLTIME_HPP
+#define KOKKOS_IMPLWALLTIME_HPP
+
+#include <Kokkos_Macros.hpp>
+
+KOKKOS_IMPL_WARNING("This file is deprecated. Use <Kokkos_Timer.hpp> instead.")
+
+#include <Kokkos_Timer.hpp>
+
+#ifdef KOKKOS_ENABLE_DEPRECATED_CODE_3
+namespace Kokkos {
+namespace Impl {
+
+/** \brief  Time since construction
+ *   Timer promoted from Impl to Kokkos ns
+ *   This file included for backwards compatibility
+ */
+using Timer KOKKOS_DEPRECATED_WITH_COMMENT("Use Kokkos::Timer instead!") =
+    Kokkos::Timer;
+
+}  // namespace Impl
+}  // namespace Kokkos
+#endif
+
+#endif /* #ifndef KOKKOS_IMPLWALLTIME_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Tools.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Tools.hpp
new file mode 100644 (file)
index 0000000..8d6ec64
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+/**
+ * Header file to include all of Kokkos Tooling support
+ */
+
+#ifndef KOKKOS_IMPL_KOKKOS_TOOLS_HPP
+#define KOKKOS_IMPL_KOKKOS_TOOLS_HPP
+
+#include <impl/Kokkos_Profiling.hpp>
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Tools_Generic.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Tools_Generic.hpp
new file mode 100644 (file)
index 0000000..702fc09
--- /dev/null
@@ -0,0 +1,490 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_KOKKOS_TOOLS_GENERIC_HPP
+#define KOKKOS_IMPL_KOKKOS_TOOLS_GENERIC_HPP
+
+#include <impl/Kokkos_Profiling.hpp>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_ExecPolicy.hpp>
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Tuners.hpp>
+
+namespace Kokkos {
+
+namespace Tools {
+
+namespace Experimental {
+
+namespace Impl {
+
+static std::map<std::string, Kokkos::Tools::Experimental::TeamSizeTuner>
+    team_tuners;
+
+template <int Rank>
+using MDRangeTuningMap =
+    std::map<std::string, Kokkos::Tools::Experimental::MDRangeTuner<Rank>>;
+
+template <int Rank>
+static MDRangeTuningMap<Rank> mdrange_tuners;
+
+// For any policies without a tuning implementation, with a reducer
+template <class ReducerType, class ExecPolicy, class Functor, typename TagType>
+void tune_policy(const size_t, const std::string&, ExecPolicy&, const Functor&,
+                 TagType) {}
+
+// For any policies without a tuning implementation, without a reducer
+template <class ExecPolicy, class Functor, typename TagType>
+void tune_policy(const size_t, const std::string&, ExecPolicy&, const Functor&,
+                 const TagType&) {}
+
+/**
+ * Tuning for parallel_fors and parallel_scans is a fairly simple process.
+ *
+ * Tuning for a parallel_reduce turns out to be a little more complicated.
+ *
+ * If you're tuning a reducer, it might be a complex or a simple reducer
+ * (an example of simple would be one where the join is just "+".
+ *
+ * Unfortunately these two paths are very different in terms of which classes
+ * get instantiated. Thankfully, all of this complexity is encoded in the
+ * ReducerType. If it's a "simple" reducer, this will be Kokkos::InvalidType,
+ * otherwise it'll be something else.
+ *
+ * If the type is complex, for the code to be generally right you _must_
+ * pass an instance of that ReducerType to functions that determine
+ * eligible team sizes. If the type is simple, you can't construct one,
+ * you use the simpler 2-arg formulation of team_size_recommended/max.
+ */
+
+namespace Impl {
+
+struct SimpleTeamSizeCalculator {
+  template <typename Policy, typename Functor, typename Tag>
+  int get_max_team_size(const Policy& policy, const Functor& functor,
+                        const Tag tag) {
+    auto max = policy.team_size_max(functor, tag);
+    return max;
+  }
+  template <typename Policy, typename Functor, typename Tag>
+  int get_recommended_team_size(const Policy& policy, const Functor& functor,
+                                const Tag tag) {
+    auto max = policy.team_size_recommended(functor, tag);
+    return max;
+  }
+  template <typename Policy, typename Functor>
+  int get_mdrange_max_tile_size_product(const Policy& policy,
+                                        const Functor& functor,
+                                        const Kokkos::ParallelForTag&) {
+    using exec_space = typename Policy::execution_space;
+    using driver     = Kokkos::Impl::ParallelFor<Functor, Policy, exec_space>;
+    return driver::max_tile_size_product(policy, functor);
+  }
+  template <typename Policy, typename Functor>
+  int get_mdrange_max_tile_size_product(const Policy& policy,
+                                        const Functor& functor,
+                                        const Kokkos::ParallelReduceTag&) {
+    using exec_space = typename Policy::execution_space;
+    using driver =
+        Kokkos::Impl::ParallelReduce<Functor, Policy, Kokkos::InvalidType,
+                                     exec_space>;
+    return driver::max_tile_size_product(policy, functor);
+  }
+};
+
+// when we have a complex reducer, we need to pass an
+// instance to team_size_recommended/max. Reducers
+// aren't default constructible, but they are
+// constructible from a reference to an
+// instance of their value_type so we construct
+// a value_type and temporary reducer here
+template <typename ReducerType>
+struct ComplexReducerSizeCalculator {
+  template <typename Policy, typename Functor, typename Tag>
+  int get_max_team_size(const Policy& policy, const Functor& functor,
+                        const Tag tag) {
+    using value_type = typename ReducerType::value_type;
+    value_type value;
+    ReducerType reducer_example = ReducerType(value);
+    return policy.team_size_max(functor, reducer_example, tag);
+  }
+  template <typename Policy, typename Functor, typename Tag>
+  int get_recommended_team_size(const Policy& policy, const Functor& functor,
+                                const Tag tag) {
+    using value_type = typename ReducerType::value_type;
+    value_type value;
+    ReducerType reducer_example = ReducerType(value);
+    return policy.team_size_recommended(functor, reducer_example, tag);
+  }
+  template <typename Policy, typename Functor>
+  int get_mdrange_max_tile_size_product(const Policy& policy,
+                                        const Functor& functor,
+                                        const Kokkos::ParallelReduceTag&) {
+    using exec_space = typename Policy::execution_space;
+    using driver =
+        Kokkos::Impl::ParallelReduce<Functor, Policy, ReducerType, exec_space>;
+    return driver::max_tile_size_product(policy, functor);
+  }
+};
+
+}  // namespace Impl
+
+template <class Tuner, class Functor, class TagType,
+          class TuningPermissionFunctor, class Map, class Policy>
+void generic_tune_policy(const std::string& label_in, Map& map, Policy& policy,
+                         const Functor& functor, const TagType& tag,
+                         const TuningPermissionFunctor& should_tune) {
+  if (should_tune(policy)) {
+    std::string label = label_in;
+    if (label_in.empty()) {
+      using policy_type = std::remove_reference_t<decltype(policy)>;
+      using work_tag    = typename policy_type::work_tag;
+      Kokkos::Impl::ParallelConstructName<Functor, work_tag> name(label);
+      label = name.get();
+    }
+    auto tuner_iter = [&]() {
+      auto my_tuner = map.find(label);
+      if (my_tuner == map.end()) {
+        return (map.emplace(label, Tuner(label, policy, functor, tag,
+                                         Impl::SimpleTeamSizeCalculator{}))
+                    .first);
+      }
+      return my_tuner;
+    }();
+    tuner_iter->second.tune(policy);
+  }
+}
+template <class Tuner, class ReducerType, class Functor, class TagType,
+          class TuningPermissionFunctor, class Map, class Policy>
+void generic_tune_policy(const std::string& label_in, Map& map, Policy& policy,
+                         const Functor& functor, const TagType& tag,
+                         const TuningPermissionFunctor& should_tune) {
+  if (should_tune(policy)) {
+    std::string label = label_in;
+    if (label_in.empty()) {
+      using policy_type = std::remove_reference_t<decltype(policy)>;
+      using work_tag    = typename policy_type::work_tag;
+      Kokkos::Impl::ParallelConstructName<Functor, work_tag> name(label);
+      label = name.get();
+    }
+    auto tuner_iter = [&]() {
+      auto my_tuner = map.find(label);
+      if (my_tuner == map.end()) {
+        return (map.emplace(
+                       label,
+                       Tuner(label, policy, functor, tag,
+                             Impl::ComplexReducerSizeCalculator<ReducerType>{}))
+                    .first);
+      }
+      return my_tuner;
+    }();
+    tuner_iter->second.tune(policy);
+  }
+}
+
+// tune a TeamPolicy, without reducer
+template <class Functor, class TagType, class... Properties>
+void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+                 Kokkos::TeamPolicy<Properties...>& policy,
+                 const Functor& functor, const TagType& tag) {
+  generic_tune_policy<Experimental::TeamSizeTuner>(
+      label_in, team_tuners, policy, functor, tag,
+      [](const Kokkos::TeamPolicy<Properties...>& candidate_policy) {
+        return (candidate_policy.impl_auto_team_size() ||
+                candidate_policy.impl_auto_vector_length());
+      });
+}
+
+// tune a TeamPolicy, with reducer
+template <class ReducerType, class Functor, class TagType, class... Properties>
+void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+                 Kokkos::TeamPolicy<Properties...>& policy,
+                 const Functor& functor, const TagType& tag) {
+  generic_tune_policy<Experimental::TeamSizeTuner, ReducerType>(
+      label_in, team_tuners, policy, functor, tag,
+      [](const Kokkos::TeamPolicy<Properties...>& candidate_policy) {
+        return (candidate_policy.impl_auto_team_size() ||
+                candidate_policy.impl_auto_vector_length());
+      });
+}
+
+// tune a MDRangePolicy, without reducer
+template <class Functor, class TagType, class... Properties>
+void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+                 Kokkos::MDRangePolicy<Properties...>& policy,
+                 const Functor& functor, const TagType& tag) {
+  using Policy              = Kokkos::MDRangePolicy<Properties...>;
+  static constexpr int rank = Policy::rank;
+  generic_tune_policy<Experimental::MDRangeTuner<rank>>(
+      label_in, mdrange_tuners<rank>, policy, functor, tag,
+      [](const Policy& candidate_policy) {
+        return candidate_policy.impl_tune_tile_size();
+      });
+}
+
+// tune a MDRangePolicy, with reducer
+template <class ReducerType, class Functor, class TagType, class... Properties>
+void tune_policy(const size_t /**tuning_context*/, const std::string& label_in,
+                 Kokkos::MDRangePolicy<Properties...>& policy,
+                 const Functor& functor, const TagType& tag) {
+  using Policy              = Kokkos::MDRangePolicy<Properties...>;
+  static constexpr int rank = Policy::rank;
+  generic_tune_policy<Experimental::MDRangeTuner<rank>, ReducerType>(
+      label_in, mdrange_tuners<rank>, policy, functor, tag,
+      [](const Policy& candidate_policy) {
+        return candidate_policy.impl_tune_tile_size();
+      });
+}
+
+template <class ReducerType>
+struct ReductionSwitcher {
+  template <class Functor, class TagType, class ExecPolicy>
+  static void tune(const size_t tuning_context, const std::string& label,
+                   ExecPolicy& policy, const Functor& functor,
+                   const TagType& tag) {
+    if (Kokkos::tune_internals()) {
+      tune_policy<ReducerType>(tuning_context, label, policy, functor, tag);
+    }
+  }
+};
+
+template <>
+struct ReductionSwitcher<Kokkos::InvalidType> {
+  template <class Functor, class TagType, class ExecPolicy>
+  static void tune(const size_t tuning_context, const std::string& label,
+                   ExecPolicy& policy, const Functor& functor,
+                   const TagType& tag) {
+    if (Kokkos::tune_internals()) {
+      tune_policy(tuning_context, label, policy, functor, tag);
+    }
+  }
+};
+
+template <class Tuner, class Functor, class TagType,
+          class TuningPermissionFunctor, class Map, class Policy>
+void generic_report_results(const std::string& label_in, Map& map,
+                            Policy& policy, const Functor&, const TagType&,
+                            const TuningPermissionFunctor& should_tune) {
+  if (should_tune(policy)) {
+    std::string label = label_in;
+    if (label_in.empty()) {
+      using policy_type = std::remove_reference_t<decltype(policy)>;
+      using work_tag    = typename policy_type::work_tag;
+      Kokkos::Impl::ParallelConstructName<Functor, work_tag> name(label);
+      label = name.get();
+    }
+    auto tuner_iter = map[label];
+    tuner_iter.end();
+  }
+}
+
+// report results for a policy type we don't tune (do nothing)
+template <class ExecPolicy, class Functor, typename TagType>
+void report_policy_results(const size_t, const std::string&, ExecPolicy&,
+                           const Functor&, const TagType&) {}
+
+// report results for a TeamPolicy
+template <class Functor, class TagType, class... Properties>
+void report_policy_results(const size_t /**tuning_context*/,
+                           const std::string& label_in,
+                           Kokkos::TeamPolicy<Properties...>& policy,
+                           const Functor& functor, const TagType& tag) {
+  generic_report_results<Experimental::TeamSizeTuner>(
+      label_in, team_tuners, policy, functor, tag,
+      [](const Kokkos::TeamPolicy<Properties...>& candidate_policy) {
+        return (candidate_policy.impl_auto_team_size() ||
+                candidate_policy.impl_auto_vector_length());
+      });
+}
+
+// report results for an MDRangePolicy
+template <class Functor, class TagType, class... Properties>
+void report_policy_results(const size_t /**tuning_context*/,
+                           const std::string& label_in,
+                           Kokkos::MDRangePolicy<Properties...>& policy,
+                           const Functor& functor, const TagType& tag) {
+  using Policy              = Kokkos::MDRangePolicy<Properties...>;
+  static constexpr int rank = Policy::rank;
+  generic_report_results<Experimental::MDRangeTuner<rank>>(
+      label_in, mdrange_tuners<rank>, policy, functor, tag,
+      [](const Policy& candidate_policy) {
+        return candidate_policy.impl_tune_tile_size();
+      });
+}
+
+}  // namespace Impl
+
+}  // namespace Experimental
+
+namespace Impl {
+
+template <class ExecPolicy, class FunctorType>
+void begin_parallel_for(ExecPolicy& policy, FunctorType& functor,
+                        const std::string& label, uint64_t& kpID) {
+  if (Kokkos::Tools::profileLibraryLoaded()) {
+    Kokkos::Impl::ParallelConstructName<FunctorType,
+                                        typename ExecPolicy::work_tag>
+        name(label);
+    Kokkos::Tools::beginParallelFor(
+        name.get(), Kokkos::Profiling::Experimental::device_id(policy.space()),
+        &kpID);
+  }
+#ifdef KOKKOS_ENABLE_TUNING
+  size_t context_id = Kokkos::Tools::Experimental::get_new_context_id();
+  if (Kokkos::tune_internals()) {
+    Experimental::Impl::tune_policy(context_id, label, policy, functor,
+                                    Kokkos::ParallelForTag{});
+  }
+#else
+  (void)functor;
+#endif
+}
+
+template <class ExecPolicy, class FunctorType>
+void end_parallel_for(ExecPolicy& policy, FunctorType& functor,
+                      const std::string& label, uint64_t& kpID) {
+  if (Kokkos::Tools::profileLibraryLoaded()) {
+    Kokkos::Tools::endParallelFor(kpID);
+  }
+#ifdef KOKKOS_ENABLE_TUNING
+  size_t context_id = Kokkos::Tools::Experimental::get_current_context_id();
+  if (Kokkos::tune_internals()) {
+    Experimental::Impl::report_policy_results(
+        context_id, label, policy, functor, Kokkos::ParallelForTag{});
+  }
+#else
+  (void)policy;
+  (void)functor;
+  (void)label;
+#endif
+}
+
+template <class ExecPolicy, class FunctorType>
+void begin_parallel_scan(ExecPolicy& policy, FunctorType& functor,
+                         const std::string& label, uint64_t& kpID) {
+  if (Kokkos::Tools::profileLibraryLoaded()) {
+    Kokkos::Impl::ParallelConstructName<FunctorType,
+                                        typename ExecPolicy::work_tag>
+        name(label);
+    Kokkos::Tools::beginParallelScan(
+        name.get(), Kokkos::Profiling::Experimental::device_id(policy.space()),
+        &kpID);
+  }
+#ifdef KOKKOS_ENABLE_TUNING
+  size_t context_id = Kokkos::Tools::Experimental::get_new_context_id();
+  if (Kokkos::tune_internals()) {
+    Experimental::Impl::tune_policy(context_id, label, policy, functor,
+                                    Kokkos::ParallelScanTag{});
+  }
+#else
+  (void)functor;
+#endif
+}
+
+template <class ExecPolicy, class FunctorType>
+void end_parallel_scan(ExecPolicy& policy, FunctorType& functor,
+                       const std::string& label, uint64_t& kpID) {
+  if (Kokkos::Tools::profileLibraryLoaded()) {
+    Kokkos::Tools::endParallelScan(kpID);
+  }
+#ifdef KOKKOS_ENABLE_TUNING
+  size_t context_id = Kokkos::Tools::Experimental::get_current_context_id();
+  if (Kokkos::tune_internals()) {
+    Experimental::Impl::report_policy_results(
+        context_id, label, policy, functor, Kokkos::ParallelScanTag{});
+  }
+#else
+  (void)policy;
+  (void)functor;
+  (void)label;
+#endif
+}
+
+template <class ReducerType, class ExecPolicy, class FunctorType>
+void begin_parallel_reduce(ExecPolicy& policy, FunctorType& functor,
+                           const std::string& label, uint64_t& kpID) {
+  if (Kokkos::Tools::profileLibraryLoaded()) {
+    Kokkos::Impl::ParallelConstructName<FunctorType,
+                                        typename ExecPolicy::work_tag>
+        name(label);
+    Kokkos::Tools::beginParallelReduce(
+        name.get(), Kokkos::Profiling::Experimental::device_id(policy.space()),
+        &kpID);
+  }
+#ifdef KOKKOS_ENABLE_TUNING
+  size_t context_id = Kokkos::Tools::Experimental::get_new_context_id();
+  Experimental::Impl::ReductionSwitcher<ReducerType>::tune(
+      context_id, label, policy, functor, Kokkos::ParallelReduceTag{});
+#else
+  (void)functor;
+#endif
+}
+
+template <class ReducerType, class ExecPolicy, class FunctorType>
+void end_parallel_reduce(ExecPolicy& policy, FunctorType& functor,
+                         const std::string& label, uint64_t& kpID) {
+  if (Kokkos::Tools::profileLibraryLoaded()) {
+    Kokkos::Tools::endParallelReduce(kpID);
+  }
+#ifdef KOKKOS_ENABLE_TUNING
+  size_t context_id = Kokkos::Tools::Experimental::get_current_context_id();
+  if (Kokkos::tune_internals()) {
+    Experimental::Impl::report_policy_results(
+        context_id, label, policy, functor, Kokkos::ParallelReduceTag{});
+  }
+#else
+  (void)policy;
+  (void)functor;
+  (void)label;
+#endif
+}
+
+}  // end namespace Impl
+
+}  // namespace Tools
+
+}  // namespace Kokkos
+
+#endif  // header guard
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Traits.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Traits.hpp
new file mode 100644 (file)
index 0000000..38edc11
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOSTRAITS_HPP
+#define KOKKOSTRAITS_HPP
+
+#include <cstddef>
+#include <cstdint>
+#include <Kokkos_Macros.hpp>
+#include <impl/Kokkos_BitOps.hpp>
+#include <string>
+#include <type_traits>
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+// Help with C++11 variadic argument packs
+
+template <unsigned I, typename... Pack>
+struct get_type {
+  using type = void;
+};
+
+template <typename T, typename... Pack>
+struct get_type<0, T, Pack...> {
+  using type = T;
+};
+
+template <unsigned I, typename T, typename... Pack>
+struct get_type<I, T, Pack...> {
+  using type = typename get_type<I - 1, Pack...>::type;
+};
+
+template <typename T, typename... Pack>
+struct has_type {
+  enum : bool { value = false };
+};
+
+template <typename T, typename S, typename... Pack>
+struct has_type<T, S, Pack...> {
+ private:
+  enum { self_value = std::is_same<T, S>::value };
+
+  using next = has_type<T, Pack...>;
+
+  static_assert(
+      !(self_value && next::value),
+      "Error: more than one member of the argument pack matches the type");
+
+ public:
+  enum : bool { value = self_value || next::value };
+};
+
+template <typename DefaultType, template <typename> class Condition,
+          typename... Pack>
+struct has_condition {
+  enum : bool { value = false };
+  using type = DefaultType;
+};
+
+template <typename DefaultType, template <typename> class Condition, typename S,
+          typename... Pack>
+struct has_condition<DefaultType, Condition, S, Pack...> {
+ private:
+  enum { self_value = Condition<S>::value };
+
+  using next = has_condition<DefaultType, Condition, Pack...>;
+
+  static_assert(
+      !(self_value && next::value),
+      "Error: more than one member of the argument pack satisfies condition");
+
+ public:
+  enum : bool { value = self_value || next::value };
+
+  using type = std::conditional_t<self_value, S, typename next::type>;
+};
+
+template <class... Args>
+struct are_integral {
+  enum : bool { value = true };
+};
+
+template <typename T, class... Args>
+struct are_integral<T, Args...> {
+  enum {
+    value =
+        // Accept std::is_integral OR std::is_enum as an integral value
+        // since a simple enum value is automically convertible to an
+        // integral value.
+    (std::is_integral<T>::value || std::is_enum<T>::value) &&
+    are_integral<Args...>::value
+  };
+};
+
+//----------------------------------------------------------------------------
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// Other traits
+
+namespace Kokkos {
+namespace Impl {
+
+//----------------------------------------------------------------------------
+// if_
+
+template <bool Cond, typename TrueType, typename FalseType>
+struct if_c {
+  enum : bool { value = Cond };
+
+  using type = FalseType;
+
+  using value_type = std::remove_const_t<std::remove_reference_t<type>>;
+
+  using const_value_type = std::add_const_t<value_type>;
+
+  static KOKKOS_INLINE_FUNCTION const_value_type& select(const_value_type& v) {
+    return v;
+  }
+
+  static KOKKOS_INLINE_FUNCTION value_type& select(value_type& v) { return v; }
+
+  template <class T>
+  static KOKKOS_INLINE_FUNCTION value_type& select(const T&) {
+    value_type* ptr(0);
+    return *ptr;
+  }
+
+  template <class T>
+  static KOKKOS_INLINE_FUNCTION const_value_type& select(const T&,
+                                                         const_value_type& v) {
+    return v;
+  }
+
+  template <class T>
+  static KOKKOS_INLINE_FUNCTION value_type& select(const T&, value_type& v) {
+    return v;
+  }
+};
+
+template <typename TrueType, typename FalseType>
+struct if_c<true, TrueType, FalseType> {
+  enum : bool { value = true };
+
+  using type = TrueType;
+
+  using value_type = std::remove_const_t<std::remove_reference_t<type>>;
+
+  using const_value_type = std::add_const_t<value_type>;
+
+  static KOKKOS_INLINE_FUNCTION const_value_type& select(const_value_type& v) {
+    return v;
+  }
+
+  static KOKKOS_INLINE_FUNCTION value_type& select(value_type& v) { return v; }
+
+  template <class T>
+  static KOKKOS_INLINE_FUNCTION value_type& select(const T&) {
+    value_type* ptr(0);
+    return *ptr;
+  }
+
+  template <class F>
+  static KOKKOS_INLINE_FUNCTION const_value_type& select(const_value_type& v,
+                                                         const F&) {
+    return v;
+  }
+
+  template <class F>
+  static KOKKOS_INLINE_FUNCTION value_type& select(value_type& v, const F&) {
+    return v;
+  }
+};
+
+template <typename TrueType>
+struct if_c<false, TrueType, void> {
+  enum : bool { value = false };
+
+  using type       = void;
+  using value_type = void;
+};
+
+template <typename FalseType>
+struct if_c<true, void, FalseType> {
+  enum : bool { value = true };
+
+  using type       = void;
+  using value_type = void;
+};
+
+//----------------------------------------------------------------------------
+// These 'constexpr'functions can be used as
+// both regular functions and meta-function.
+
+/**\brief  There exists integral 'k' such that N = 2^k */
+KOKKOS_INLINE_FUNCTION
+constexpr bool is_integral_power_of_two(const size_t N) {
+  return (0 < N) && (0 == (N & (N - 1)));
+}
+
+/**\brief  Return integral 'k' such that N = 2^k, assuming valid.  */
+KOKKOS_INLINE_FUNCTION
+constexpr unsigned integral_power_of_two_assume_valid(const size_t N) {
+  return N == 1 ? 0 : 1 + integral_power_of_two_assume_valid(N >> 1);
+}
+
+/**\brief  Return integral 'k' such that N = 2^k, if exists.
+ *         If does not exist return ~0u.
+ */
+KOKKOS_INLINE_FUNCTION
+constexpr unsigned integral_power_of_two(const size_t N) {
+  return is_integral_power_of_two(N) ? integral_power_of_two_assume_valid(N)
+                                     : ~0u;
+}
+
+/** \brief  If power of two then return power,
+ *          otherwise return ~0u.
+ */
+KOKKOS_FORCEINLINE_FUNCTION
+unsigned power_of_two_if_valid(const unsigned N) {
+  unsigned p = ~0u;
+  if (is_integral_power_of_two(N)) {
+    p = bit_scan_forward(N);
+  }
+  return p;
+}
+
+//----------------------------------------------------------------------------
+
+template <typename T, T v, bool NonZero = (v != T(0))>
+struct integral_nonzero_constant {
+  // Declaration of 'static const' causes an unresolved linker symbol in debug
+  // static const T value = v ;
+  enum { value = T(v) };
+  using value_type = T;
+  using type       = integral_nonzero_constant<T, v>;
+  KOKKOS_INLINE_FUNCTION integral_nonzero_constant(const T&) {}
+};
+
+template <typename T, T zero>
+struct integral_nonzero_constant<T, zero, false> {
+  const T value;
+  using value_type = T;
+  using type       = integral_nonzero_constant<T, 0>;
+  KOKKOS_INLINE_FUNCTION integral_nonzero_constant(const T& v) : value(v) {}
+};
+
+//----------------------------------------------------------------------------
+
+template <class T>
+struct make_all_extents_into_pointers {
+  using type = T;
+};
+
+template <class T, unsigned N>
+struct make_all_extents_into_pointers<T[N]> {
+  using type = typename make_all_extents_into_pointers<T>::type*;
+};
+
+template <class T>
+struct make_all_extents_into_pointers<T*> {
+  using type = typename make_all_extents_into_pointers<T>::type*;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOSTRAITS_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Utilities.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Utilities.hpp
new file mode 100644 (file)
index 0000000..37b7410
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CORE_IMPL_UTILITIES_HPP
+#define KOKKOS_CORE_IMPL_UTILITIES_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <cstdint>
+#include <type_traits>
+#include <initializer_list>  // in-order comma operator fold emulation
+#include <utility>           // integer_sequence and friends
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename T>
+struct identity {
+  using type = T;
+};
+
+template <typename T>
+using identity_t = typename identity<T>::type;
+
+template <typename... Is>
+struct always_true : std::true_type {};
+
+#if defined(__cpp_lib_void_t)
+// since C++17
+using std::void_t;
+#else
+template <class...>
+using void_t = void;
+#endif
+
+//==============================================================================
+// <editor-fold desc="remove_cvref_t"> {{{1
+
+#if defined(__cpp_lib_remove_cvref)
+// since C++20
+using std::remove_cvref;
+using std::remove_cvref_t;
+#else
+template <class T>
+struct remove_cvref {
+  using type = std::remove_cv_t<std::remove_reference_t<T>>;
+};
+
+template <class T>
+using remove_cvref_t = typename remove_cvref<T>::type;
+#endif
+
+// </editor-fold> end remove_cvref_t }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="is_specialization_of"> {{{1
+
+template <class Type, template <class...> class Template, class Enable = void>
+struct is_specialization_of : std::false_type {};
+
+template <template <class...> class Template, class... Args>
+struct is_specialization_of<Template<Args...>, Template> : std::true_type {};
+
+// </editor-fold> end is_specialization_of }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="Folding emulation"> {{{1
+
+// acts like void for comma fold emulation
+struct _fold_comma_emulation_return {};
+
+template <class... Ts>
+constexpr KOKKOS_INLINE_FUNCTION _fold_comma_emulation_return
+emulate_fold_comma_operator(Ts&&...) noexcept {
+  return _fold_comma_emulation_return{};
+}
+
+#define KOKKOS_IMPL_FOLD_COMMA_OPERATOR(expr)                                \
+  ::Kokkos::Impl::emulate_fold_comma_operator(                               \
+      ::std::initializer_list<::Kokkos::Impl::_fold_comma_emulation_return>{ \
+          ((expr), ::Kokkos::Impl::_fold_comma_emulation_return{})...})
+
+// </editor-fold> end Folding emulation }}}1
+//==============================================================================
+
+//==============================================================================
+// destruct_delete is a unique_ptr deleter for objects
+// created by placement new into already allocated memory
+// by only calling the destructor on the object.
+//
+// Because unique_ptr never calls its deleter with a nullptr value,
+// no need to check if p == nullptr.
+//
+// Note:  This differs in interface from std::default_delete in that the
+// function call operator is templated instead of the class, to make
+// it easier to use and disallow specialization.
+struct destruct_delete {
+  template <typename T>
+  KOKKOS_INLINE_FUNCTION constexpr void operator()(T* p) const noexcept {
+    p->~T();
+  }
+};
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="type_list"> {{{1
+
+// An intentionally uninstantiateable type_list for metaprogramming purposes
+template <class...>
+struct type_list;
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="type_list_remove_first"> {{{2
+
+// Currently linear complexity; if we use this a lot, maybe make it better?
+
+template <class Entry, class InList, class OutList>
+struct _type_list_remove_first_impl;
+
+template <class Entry, class T, class... Ts, class... OutTs>
+struct _type_list_remove_first_impl<Entry, type_list<T, Ts...>,
+                                    type_list<OutTs...>>
+    : _type_list_remove_first_impl<Entry, type_list<Ts...>,
+                                   type_list<OutTs..., T>> {};
+
+template <class Entry, class... Ts, class... OutTs>
+struct _type_list_remove_first_impl<Entry, type_list<Entry, Ts...>,
+                                    type_list<OutTs...>>
+    : _type_list_remove_first_impl<Entry, type_list<>,
+                                   type_list<OutTs..., Ts...>> {};
+
+template <class Entry, class... OutTs>
+struct _type_list_remove_first_impl<Entry, type_list<>, type_list<OutTs...>>
+    : identity<type_list<OutTs...>> {};
+
+template <class Entry, class List>
+struct type_list_remove_first
+    : _type_list_remove_first_impl<Entry, List, type_list<>> {};
+
+// </editor-fold> end type_list_remove_first }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="type_list_any"> {{{2
+
+template <template <class> class UnaryPred, class List>
+struct type_list_any;
+
+#ifdef KOKKOS_ENABLE_CXX17
+template <template <class> class UnaryPred, class... Ts>
+struct type_list_any<UnaryPred, type_list<Ts...>>
+    : std::bool_constant<(UnaryPred<Ts>::value || ...)> {};
+#else
+template <template <class> class UnaryPred, class T, class... Ts>
+struct type_list_any<UnaryPred, type_list<T, Ts...>> {
+  using type = typename std::conditional_t<
+      UnaryPred<T>::value, std::true_type,
+      type_list_any<UnaryPred, type_list<Ts...>>>::type;
+  static constexpr auto value = type::value;
+};
+
+template <template <class> class UnaryPred>
+struct type_list_any<UnaryPred, type_list<>> : std::false_type {};
+
+#endif
+
+// </editor-fold> end type_list_any }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="concat_type_list"> {{{2
+//  concat_type_list combines types in multiple type_lists
+
+// forward declaration
+template <typename... T>
+struct concat_type_list;
+
+// alias
+template <typename... T>
+using concat_type_list_t = typename concat_type_list<T...>::type;
+
+// final instantiation
+template <typename... T>
+struct concat_type_list<type_list<T...>> {
+  using type = type_list<T...>;
+};
+
+// combine consecutive type_lists
+template <typename... T, typename... U, typename... Tail>
+struct concat_type_list<type_list<T...>, type_list<U...>, Tail...>
+    : concat_type_list<type_list<T..., U...>, Tail...> {};
+// </editor-fold> end concat_type_list }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="filter_type_list"> {{{2
+//  filter_type_list generates type-list of types which satisfy
+//  PredicateT<T>::value == ValueT
+
+template <template <typename> class PredicateT, typename TypeListT,
+          bool ValueT = true>
+struct filter_type_list;
+
+template <template <typename> class PredicateT, typename... T, bool ValueT>
+struct filter_type_list<PredicateT, type_list<T...>, ValueT> {
+  using type =
+      concat_type_list_t<std::conditional_t<PredicateT<T>::value == ValueT,
+                                            type_list<T>, type_list<>>...>;
+};
+
+template <template <typename> class PredicateT, typename T, bool ValueT = true>
+using filter_type_list_t =
+    typename filter_type_list<PredicateT, T, ValueT>::type;
+
+// </editor-fold> end filter_type_list }}}2
+//------------------------------------------------------------------------------
+
+// </editor-fold> end type_list }}}1
+//==============================================================================
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif  // KOKKOS_CORE_IMPL_UTILITIES_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_VLAEmulation.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_VLAEmulation.hpp
new file mode 100644 (file)
index 0000000..6773263
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_VLAEMULATION_HPP
+#define KOKKOS_IMPL_VLAEMULATION_HPP
+
+#include <Kokkos_Macros.hpp>
+#if defined(KOKKOS_ENABLE_TASKDAG)
+
+#include <Kokkos_Core_fwd.hpp>
+
+#include <impl/Kokkos_Error.hpp>  // KOKKOS_EXPECTS
+
+#include <type_traits>  // std::is_abstract<>, ...
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Derived, class VLAValueType, class EntryCountType = int32_t>
+struct ObjectWithVLAEmulation;
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+/** @brief Attorney to enable private CRTP inheritance from
+ * ObjectWithVLAEmulation
+ */
+struct VLAEmulationAccess {
+ private:
+  template <class, class, class>
+  friend struct ObjectWithVLAEmulation;
+
+  template <class Derived, class VLAValueType, class EntryCountType>
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr Derived* _cast_to_derived(
+      ObjectWithVLAEmulation<Derived, VLAValueType, EntryCountType>*
+          base) noexcept {
+    return static_cast<Derived*>(base);
+  }
+
+  template <class Derived, class VLAValueType, class EntryCountType>
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr Derived const* _cast_to_derived(
+      ObjectWithVLAEmulation<Derived, VLAValueType, EntryCountType> const*
+          base) noexcept {
+    return static_cast<Derived const*>(base);
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+/** \brief A CRTP base class for a type that includes a variable-length array by
+ * allocation
+ *
+ *  The storage for the derived type must be allocated manually and the objects
+ *  (both derived type and VLA objects) must be constructed with placement new.
+ *  Obviously, this can't be done for objects on the stack.
+ *
+ *  Note: Though most uses of this currently delete the copy and move
+ * constructor in the `Derived` type, this type is intended to have value
+ * semantics.
+ *
+ *  \todo @documentation elaborate on implications of value semantics for this
+ * class template
+ *
+ */
+template <class Derived, class VLAValueType,
+          class EntryCountType /* = int32_t */
+          >
+struct ObjectWithVLAEmulation {
+ public:
+  using object_type          = Derived;
+  using vla_value_type       = VLAValueType;
+  using vla_entry_count_type = EntryCountType;
+
+  using iterator       = VLAValueType*;
+  using const_iterator = std::add_const_t<VLAValueType>*;
+
+  // TODO @tasking @minor DSH require that Derived be marked final? (note that
+  // std::is_final is C++14)
+  // TODO @tasking @minor DSH delete non-placement operator new for Derived
+  // type?
+
+ private:
+  vla_entry_count_type m_num_entries;
+
+  // CRTP boilerplate
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  /* constexpr */
+  Derived* _this() noexcept {
+    return VLAEmulationAccess::_cast_to_derived(this);
+  }
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  /* constexpr */
+  Derived const* _this() const noexcept {
+    return VLAEmulationAccess::_cast_to_derived(this);
+  }
+
+  // Note: can't be constexpr because of reinterpret_cast
+  KOKKOS_FORCEINLINE_FUNCTION
+  /* constexpr */
+  vla_value_type* _vla_pointer() noexcept {
+    // The data starts right after the aligned storage of Derived
+    return reinterpret_cast<vla_value_type*>(_this() + 1);
+  }
+
+  // Note: can't be constexpr because of reinterpret_cast
+  KOKKOS_FORCEINLINE_FUNCTION
+  /* constexpr */
+  vla_value_type const* _vla_pointer() const noexcept {
+    // The data starts right after the aligned storage of Derived
+    return reinterpret_cast<vla_value_type const*>(_this() + 1);
+  }
+
+ public:
+  KOKKOS_INLINE_FUNCTION
+  static /* constexpr */ size_t required_allocation_size(
+      vla_entry_count_type num_vla_entries) {
+    KOKKOS_EXPECTS(num_vla_entries >= 0);
+    return sizeof(Derived) + num_vla_entries * sizeof(VLAValueType);
+  }
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Constructors, destructor, and assignment"> {{{2
+
+  // TODO @tasking @optimization DSH specialization for trivially constructible
+  // VLAValueType?
+  // TODO @tasking @minor DSH SFINAE-out this constructor for non-default
+  // contructible vla_value_types
+  KOKKOS_INLINE_FUNCTION
+  explicit ObjectWithVLAEmulation(vla_entry_count_type num_entries) noexcept(
+      noexcept(vla_value_type()))
+      : m_num_entries(num_entries) {
+    // Note: We can't do this at class scope because it unnecessarily requires
+    // object_type to be a complete type
+    static_assert(alignof(object_type) >= alignof(vla_value_type),
+                  "Can't append emulated variable length array of type with "
+                  "greater alignment than"
+                  "  the type to which the VLA is being appended");
+
+    // Note: We can't do this at class scope because it unnecessarily requires
+    // vla_value_type to be a complete type
+    static_assert(!std::is_abstract<vla_value_type>::value,
+                  "Can't use abstract type with VLA emulation");
+
+    KOKKOS_EXPECTS(num_entries >= 0);
+    for (vla_entry_count_type i = 0; i < m_num_entries; ++i) {
+      new (_vla_pointer() + i) vla_value_type();
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  ~ObjectWithVLAEmulation() {
+    for (auto&& value : *this) {
+      value.~vla_value_type();
+    }
+  }
+
+  // TODO @tasking @new_feature DSH constrained analogs for move and copy ctors
+  // and assignment ops
+  // TODO @tasking @new_feature DSH forwarding in_place constructor
+  // TODO @tasking @new_feature DSH initializer_list constructor?
+
+  // </editor-fold> end Constructors, destructor, and assignment }}}2
+  //----------------------------------------------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr EntryCountType n_vla_entries() const noexcept {
+    return m_num_entries;
+  }
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Accessing the object and the VLA values"> {{{2
+
+  KOKKOS_INLINE_FUNCTION
+  object_type& object() & { return static_cast<Derived&>(*this); }
+
+  KOKKOS_INLINE_FUNCTION
+  object_type const& object() const& {
+    return static_cast<Derived const&>(*this);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  object_type&& object() && { return static_cast<Derived&&>(*this); }
+
+  KOKKOS_INLINE_FUNCTION
+  vla_value_type& vla_value_at(vla_entry_count_type n) & {
+    KOKKOS_EXPECTS(n < n_vla_entries());
+    return _vla_pointer()[n];
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  vla_value_type const& vla_value_at(vla_entry_count_type n) const& {
+    KOKKOS_EXPECTS(n < n_vla_entries());
+    return _vla_pointer()[n];
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  vla_value_type& vla_value_at(vla_entry_count_type n) && {
+    KOKKOS_EXPECTS(n < n_vla_entries());
+    return _vla_pointer()[n];
+  }
+
+  // </editor-fold> end Accessing the object and the VLA values }}}2
+  //----------------------------------------------------------------------------
+
+  //----------------------------------------------------------------------------
+  // <editor-fold desc="Iterators"> {{{2
+
+  KOKKOS_INLINE_FUNCTION
+  iterator begin() noexcept { return _vla_pointer(); }
+
+  KOKKOS_INLINE_FUNCTION
+  const_iterator begin() const noexcept { return _vla_pointer(); }
+
+  KOKKOS_INLINE_FUNCTION
+  const_iterator cbegin() noexcept { return _vla_pointer(); }
+
+  KOKKOS_INLINE_FUNCTION
+  iterator end() noexcept { return _vla_pointer() + m_num_entries; }
+
+  KOKKOS_INLINE_FUNCTION
+  const_iterator end() const noexcept { return _vla_pointer() + m_num_entries; }
+
+  KOKKOS_INLINE_FUNCTION
+  const_iterator cend() noexcept { return _vla_pointer() + m_num_entries; }
+
+  // </editor-fold> end Iterators }}}2
+  //----------------------------------------------------------------------------
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #if defined( KOKKOS_ENABLE_TASKDAG ) */
+#endif /* #ifndef KOKKOS_IMPL_VLAEMULATION_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewArray.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewArray.hpp
new file mode 100644 (file)
index 0000000..12a5fa2
--- /dev/null
@@ -0,0 +1,636 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_VIEW_ARRAY_MAPPING_HPP
+#define KOKKOS_EXPERIMENTAL_VIEW_ARRAY_MAPPING_HPP
+
+#include <Kokkos_Array.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class DataType, class ArrayLayout, class V, size_t N, class P>
+struct ViewDataAnalysis<DataType, ArrayLayout, Kokkos::Array<V, N, P>> {
+ private:
+  using array_analysis = ViewArrayAnalysis<DataType>;
+
+  static_assert(std::is_void<P>::value, "");
+  static_assert(std::is_same<typename array_analysis::non_const_value_type,
+                             Kokkos::Array<V, N, P>>::value,
+                "");
+  static_assert(std::is_scalar<V>::value,
+                "View of Array type must be of a scalar type");
+
+ public:
+  using specialize = Kokkos::Array<>;
+
+  using dimension = typename array_analysis::dimension;
+
+ private:
+  enum {
+    is_const = std::is_same<typename array_analysis::value_type,
+                            typename array_analysis::const_value_type>::value
+  };
+
+  using array_scalar_dimension = typename dimension::template append<N>::type;
+
+  using scalar_type           = std::conditional_t<is_const, const V, V>;
+  using non_const_scalar_type = V;
+  using const_scalar_type     = const V;
+
+ public:
+  using value_type           = typename array_analysis::value_type;
+  using const_value_type     = typename array_analysis::const_value_type;
+  using non_const_value_type = typename array_analysis::non_const_value_type;
+
+  using type       = typename ViewDataType<value_type, dimension>::type;
+  using const_type = typename ViewDataType<const_value_type, dimension>::type;
+  using non_const_type =
+      typename ViewDataType<non_const_value_type, dimension>::type;
+
+  using scalar_array_type =
+      typename ViewDataType<scalar_type, array_scalar_dimension>::type;
+  using const_scalar_array_type =
+      typename ViewDataType<const_scalar_type, array_scalar_dimension>::type;
+  using non_const_scalar_array_type =
+      typename ViewDataType<non_const_scalar_type,
+                            array_scalar_dimension>::type;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/** \brief  View mapping for non-specialized data type and standard layout */
+template <class Traits>
+class ViewMapping<Traits, Kokkos::Array<>> {
+ private:
+  template <class, class...>
+  friend class ViewMapping;
+  template <class, class...>
+  friend class Kokkos::View;
+
+  using offset_type = ViewOffset<typename Traits::dimension,
+                                 typename Traits::array_layout, void>;
+
+  using handle_type = typename Traits::value_type::pointer;
+
+  handle_type m_impl_handle;
+  offset_type m_impl_offset;
+  size_t m_stride = 0;
+
+  using scalar_type = typename Traits::value_type::value_type;
+
+  using contiguous_reference = Kokkos::Array<scalar_type, (~std::size_t(0)),
+                                             Kokkos::Array<>::contiguous>;
+  using strided_reference =
+      Kokkos::Array<scalar_type, (~std::size_t(0)), Kokkos::Array<>::strided>;
+
+  enum {
+    is_contiguous_reference =
+        (Traits::rank == 0) || (std::is_same<typename Traits::array_layout,
+                                             Kokkos::LayoutRight>::value)
+  };
+
+  enum { Array_N = Traits::value_type::size() };
+  enum { Array_S = is_contiguous_reference ? Array_N : 1 };
+
+  KOKKOS_INLINE_FUNCTION
+  ViewMapping(const handle_type &arg_handle, const offset_type &arg_offset)
+      : m_impl_handle(arg_handle),
+        m_impl_offset(arg_offset),
+        m_stride(is_contiguous_reference ? 0 : arg_offset.span()) {}
+
+ public:
+  //----------------------------------------
+  // Domain dimensions
+
+  enum { Rank = Traits::dimension::rank };
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr size_t extent(const iType &r) const {
+    return m_impl_offset.m_dim.extent(r);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr typename Traits::array_layout layout()
+      const {
+    return m_impl_offset.layout();
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_0() const {
+    return m_impl_offset.dimension_0();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_1() const {
+    return m_impl_offset.dimension_1();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_2() const {
+    return m_impl_offset.dimension_2();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_3() const {
+    return m_impl_offset.dimension_3();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_4() const {
+    return m_impl_offset.dimension_4();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_5() const {
+    return m_impl_offset.dimension_5();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_6() const {
+    return m_impl_offset.dimension_6();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_7() const {
+    return m_impl_offset.dimension_7();
+  }
+
+  // Is a regular layout with uniform striding for each index.
+  using is_regular = typename offset_type::is_regular;
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
+    return m_impl_offset.stride_0();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
+    return m_impl_offset.stride_1();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
+    return m_impl_offset.stride_2();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
+    return m_impl_offset.stride_3();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
+    return m_impl_offset.stride_4();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
+    return m_impl_offset.stride_5();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
+    return m_impl_offset.stride_6();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
+    return m_impl_offset.stride_7();
+  }
+
+  //----------------------------------------
+  // Range span
+
+  /** \brief  Span of the mapped range */
+  KOKKOS_INLINE_FUNCTION constexpr size_t span() const {
+    return m_impl_offset.span() * Array_N;
+  }
+
+  /** \brief  Is the mapped range span contiguous */
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return m_impl_offset.span_is_contiguous();
+  }
+
+  using reference_type =
+      std::conditional_t<is_contiguous_reference, contiguous_reference,
+                         strided_reference>;
+
+  using pointer_type = handle_type;
+
+  /** \brief  If data references are lvalue_reference than can query pointer to
+   * memory */
+  KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
+    return m_impl_handle;
+  }
+
+  //----------------------------------------
+  // The View class performs all rank and bounds checking before
+  // calling these element reference methods.
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  reference_type reference() const {
+    return reference_type(m_impl_handle + 0, Array_N, 0);
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0) const {
+    return reference_type(m_impl_handle + m_impl_offset(i0) * Array_S, Array_N,
+                          m_stride);
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0,
+                                                       const I1 &i1) const {
+    return reference_type(m_impl_handle + m_impl_offset(i0, i1) * Array_S,
+                          Array_N, m_stride);
+  }
+
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0,
+                                                       const I1 &i1,
+                                                       const I2 &i2) const {
+    return reference_type(m_impl_handle + m_impl_offset(i0, i1, i2) * Array_S,
+                          Array_N, m_stride);
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3) const {
+    return reference_type(
+        m_impl_handle + m_impl_offset(i0, i1, i2, i3) * Array_S, Array_N,
+        m_stride);
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0 &i0,
+                                                       const I1 &i1,
+                                                       const I2 &i2,
+                                                       const I3 &i3,
+                                                       const I4 &i4) const {
+    return reference_type(
+        m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4) * Array_S, Array_N,
+        m_stride);
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3,
+            const I4 &i4, const I5 &i5) const {
+    return reference_type(
+        m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4, i5) * Array_S,
+        Array_N, m_stride);
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3,
+            const I4 &i4, const I5 &i5, const I6 &i6) const {
+    return reference_type(
+        m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4, i5, i6) * Array_S,
+        Array_N, m_stride);
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0 &i0, const I1 &i1, const I2 &i2, const I3 &i3,
+            const I4 &i4, const I5 &i5, const I6 &i6, const I7 &i7) const {
+    return reference_type(
+        m_impl_handle + m_impl_offset(i0, i1, i2, i3, i4, i5, i6, i7) * Array_S,
+        Array_N, m_stride);
+  }
+
+  //----------------------------------------
+
+ private:
+  enum { MemorySpanMask = 8 - 1 /* Force alignment on 8 byte boundary */ };
+  enum { MemorySpanSize = sizeof(scalar_type) };
+
+ public:
+  /** \brief  Span, in bytes, of the referenced memory */
+  KOKKOS_INLINE_FUNCTION constexpr size_t memory_span() const {
+    return (m_impl_offset.span() * Array_N * MemorySpanSize + MemorySpanMask) &
+           ~size_t(MemorySpanMask);
+  }
+
+  //----------------------------------------
+
+  KOKKOS_DEFAULTED_FUNCTION ViewMapping() = default;
+
+  //----------------------------------------
+
+  template <class... Args>
+  KOKKOS_INLINE_FUNCTION ViewMapping(pointer_type ptr, Args... args)
+      : m_impl_handle(ptr),
+        m_impl_offset(std::integral_constant<unsigned, 0>(), args...),
+        m_stride(m_impl_offset.span()) {}
+
+  //----------------------------------------
+
+  template <class... P>
+  Kokkos::Impl::SharedAllocationRecord<> *allocate_shared(
+      Kokkos::Impl::ViewCtorProp<P...> const &arg_prop,
+      typename Traits::array_layout const &arg_layout,
+      bool execution_space_specified) {
+    using alloc_prop = Kokkos::Impl::ViewCtorProp<P...>;
+
+    using execution_space = typename alloc_prop::execution_space;
+    using memory_space    = typename Traits::memory_space;
+    using functor_type =
+        ViewValueFunctor<typename Traits::device_type, scalar_type>;
+    using record_type =
+        Kokkos::Impl::SharedAllocationRecord<memory_space, functor_type>;
+
+    // Query the mapping for byte-size of allocation.
+    using padding = std::integral_constant<
+        unsigned int, alloc_prop::allow_padding ? sizeof(scalar_type) : 0>;
+
+    m_impl_offset = offset_type(padding(), arg_layout);
+
+    const size_t alloc_size =
+        (m_impl_offset.span() * Array_N * MemorySpanSize + MemorySpanMask) &
+        ~size_t(MemorySpanMask);
+    const auto &alloc_name =
+        static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const &>(
+            arg_prop)
+            .value;
+    const execution_space &exec_space =
+        static_cast<Kokkos::Impl::ViewCtorProp<void, execution_space> const &>(
+            arg_prop)
+            .value;
+    const memory_space &mem_space =
+        static_cast<Kokkos::Impl::ViewCtorProp<void, memory_space> const &>(
+            arg_prop)
+            .value;
+
+    // Allocate memory from the memory space and create tracking record.
+    record_type *const record =
+        execution_space_specified
+            ? record_type::allocate(exec_space, mem_space, alloc_name,
+                                    alloc_size)
+            : record_type::allocate(mem_space, alloc_name, alloc_size);
+
+    if (alloc_size) {
+      m_impl_handle =
+          handle_type(reinterpret_cast<pointer_type>(record->data()));
+
+      if (alloc_prop::initialize) {
+        // The functor constructs and destroys
+        record->m_destroy =
+            execution_space_specified
+                ? functor_type(exec_space, (pointer_type)m_impl_handle,
+                               m_impl_offset.span() * Array_N, alloc_name)
+                : functor_type((pointer_type)m_impl_handle,
+                               m_impl_offset.span() * Array_N, alloc_name);
+
+        record->m_destroy.construct_shared_allocation();
+      }
+    }
+
+    return record;
+  }
+};
+
+/** \brief Assign Array to non-Array */
+
+template <class DstTraits, class SrcTraits>
+class ViewMapping<
+    DstTraits, SrcTraits,
+    std::enable_if_t<(
+        std::is_same<typename DstTraits::memory_space,
+                     typename SrcTraits::memory_space>::value &&
+        std::is_void<typename DstTraits::specialize>::value &&
+        (std::is_same<typename DstTraits::array_layout,
+                      Kokkos::LayoutLeft>::value ||
+         std::is_same<typename DstTraits::array_layout,
+                      Kokkos::LayoutRight>::value ||
+         std::is_same<typename DstTraits::array_layout,
+                      Kokkos::LayoutStride>::value) &&
+        std::is_same<typename SrcTraits::specialize, Kokkos::Array<>>::value &&
+        (std::is_same<typename SrcTraits::array_layout,
+                      Kokkos::LayoutLeft>::value ||
+         std::is_same<typename SrcTraits::array_layout,
+                      Kokkos::LayoutRight>::value ||
+         std::is_same<typename SrcTraits::array_layout,
+                      Kokkos::LayoutStride>::value))>> {
+ public:
+  // Can only convert to View::array_type
+
+  enum {
+    is_assignable_data_type =
+        std::is_same<typename DstTraits::data_type,
+                     typename SrcTraits::scalar_array_type>::value &&
+        (DstTraits::rank == SrcTraits::rank + 1)
+  };
+  enum {
+    is_assignable =
+        std::is_same<typename DstTraits::data_type,
+                     typename SrcTraits::scalar_array_type>::value &&
+        std::is_same<typename DstTraits::array_layout,
+                     typename SrcTraits::array_layout>::value
+  };
+
+  using TrackType = Kokkos::Impl::SharedAllocationTracker;
+  using DstType   = ViewMapping<DstTraits, void>;
+  using SrcType   = ViewMapping<SrcTraits, Kokkos::Array<>>;
+
+  KOKKOS_INLINE_FUNCTION
+  static void assign(DstType &dst, const SrcType &src,
+                     const TrackType & /*src_track*/) {
+    static_assert(is_assignable, "Can only convert to array_type");
+
+    using dst_offset_type = typename DstType::offset_type;
+
+    // Array dimension becomes the last dimension.
+    // Arguments beyond the destination rank are ignored.
+    if (src.span_is_contiguous()) {  // not padded
+      dst.m_impl_offset = dst_offset_type(
+          std::integral_constant<unsigned, 0>(),
+          typename DstTraits::array_layout(
+              (0 < SrcType::Rank ? src.dimension_0()
+                                 : SrcTraits::value_type::size()),
+              (1 < SrcType::Rank ? src.dimension_1()
+                                 : SrcTraits::value_type::size()),
+              (2 < SrcType::Rank ? src.dimension_2()
+                                 : SrcTraits::value_type::size()),
+              (3 < SrcType::Rank ? src.dimension_3()
+                                 : SrcTraits::value_type::size()),
+              (4 < SrcType::Rank ? src.dimension_4()
+                                 : SrcTraits::value_type::size()),
+              (5 < SrcType::Rank ? src.dimension_5()
+                                 : SrcTraits::value_type::size()),
+              (6 < SrcType::Rank ? src.dimension_6()
+                                 : SrcTraits::value_type::size()),
+              (7 < SrcType::Rank ? src.dimension_7()
+                                 : SrcTraits::value_type::size())));
+    } else {  // is padded
+      using padded = std::integral_constant<
+          unsigned int, sizeof(typename SrcTraits::value_type::value_type)>;
+
+      dst.m_impl_offset = dst_offset_type(
+          padded(), typename DstTraits::array_layout(
+                        (0 < SrcType::Rank ? src.dimension_0()
+                                           : SrcTraits::value_type::size()),
+                        (1 < SrcType::Rank ? src.dimension_1()
+                                           : SrcTraits::value_type::size()),
+                        (2 < SrcType::Rank ? src.dimension_2()
+                                           : SrcTraits::value_type::size()),
+                        (3 < SrcType::Rank ? src.dimension_3()
+                                           : SrcTraits::value_type::size()),
+                        (4 < SrcType::Rank ? src.dimension_4()
+                                           : SrcTraits::value_type::size()),
+                        (5 < SrcType::Rank ? src.dimension_5()
+                                           : SrcTraits::value_type::size()),
+                        (6 < SrcType::Rank ? src.dimension_6()
+                                           : SrcTraits::value_type::size()),
+                        (7 < SrcType::Rank ? src.dimension_7()
+                                           : SrcTraits::value_type::size())));
+    }
+
+    dst.m_impl_handle = src.m_impl_handle;
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+template <class SrcTraits, class... Args>
+class ViewMapping<
+    std::enable_if_t<(
+        std::is_same<typename SrcTraits::specialize, Kokkos::Array<>>::value &&
+        (std::is_same<typename SrcTraits::array_layout,
+                      Kokkos::LayoutLeft>::value ||
+         std::is_same<typename SrcTraits::array_layout,
+                      Kokkos::LayoutRight>::value ||
+         std::is_same<typename SrcTraits::array_layout,
+                      Kokkos::LayoutStride>::value))>,
+    SrcTraits, Args...> {
+ private:
+  static_assert(SrcTraits::rank == sizeof...(Args), "");
+
+  enum : bool {
+    R0 = is_integral_extent<0, Args...>::value,
+    R1 = is_integral_extent<1, Args...>::value,
+    R2 = is_integral_extent<2, Args...>::value,
+    R3 = is_integral_extent<3, Args...>::value,
+    R4 = is_integral_extent<4, Args...>::value,
+    R5 = is_integral_extent<5, Args...>::value,
+    R6 = is_integral_extent<6, Args...>::value,
+    R7 = is_integral_extent<7, Args...>::value
+  };
+
+  enum {
+    rank = unsigned(R0) + unsigned(R1) + unsigned(R2) + unsigned(R3) +
+           unsigned(R4) + unsigned(R5) + unsigned(R6) + unsigned(R7)
+  };
+
+  // Whether right-most rank is a range.
+  enum {
+    R0_rev =
+        0 == SrcTraits::rank
+            ? false
+            : (1 == SrcTraits::rank
+                   ? R0
+                   : (2 == SrcTraits::rank
+                          ? R1
+                          : (3 == SrcTraits::rank
+                                 ? R2
+                                 : (4 == SrcTraits::rank
+                                        ? R3
+                                        : (5 == SrcTraits::rank
+                                               ? R4
+                                               : (6 == SrcTraits::rank
+                                                      ? R5
+                                                      : (7 == SrcTraits::rank
+                                                             ? R6
+                                                             : R7)))))))
+  };
+
+  // Subview's layout
+  using array_layout =
+      std::conditional_t<((rank == 0) ||
+                          (rank <= 2 && R0 &&
+                           std::is_same<typename SrcTraits::array_layout,
+                                        Kokkos::LayoutLeft>::value) ||
+                          (rank <= 2 && R0_rev &&
+                           std::is_same<typename SrcTraits::array_layout,
+                                        Kokkos::LayoutRight>::value)),
+                         typename SrcTraits::array_layout,
+                         Kokkos::LayoutStride>;
+
+  using value_type = typename SrcTraits::value_type;
+
+  using data_type = std::conditional_t<
+      rank == 0, value_type,
+      std::conditional_t<
+          rank == 1, value_type *,
+          std::conditional_t<
+              rank == 2, value_type **,
+              std::conditional_t<
+                  rank == 3, value_type ***,
+                  std::conditional_t<
+                      rank == 4, value_type ****,
+                      std::conditional_t<
+                          rank == 5, value_type *****,
+                          std::conditional_t<
+                              rank == 6, value_type ******,
+                              std::conditional_t<rank == 7, value_type *******,
+                                                 value_type ********>>>>>>>>;
+
+ public:
+  using traits_type = Kokkos::ViewTraits<data_type, array_layout,
+                                         typename SrcTraits::device_type,
+                                         typename SrcTraits::memory_traits>;
+
+  using type =
+      Kokkos::View<data_type, array_layout, typename SrcTraits::device_type,
+                   typename SrcTraits::memory_traits>;
+
+  KOKKOS_INLINE_FUNCTION
+  static void assign(ViewMapping<traits_type, void> &dst,
+                     ViewMapping<SrcTraits, void> const &src, Args... args) {
+    using DstType = ViewMapping<traits_type, void>;
+
+    using dst_offset_type = typename DstType::offset_type;
+    using dst_handle_type = typename DstType::handle_type;
+
+    const SubviewExtents<SrcTraits::rank, rank> extents(src.m_impl_offset.m_dim,
+                                                        args...);
+
+    dst.m_impl_offset = dst_offset_type(src.m_impl_offset, extents);
+    dst.m_impl_handle = dst_handle_type(
+        src.m_impl_handle +
+        src.m_impl_offset(extents.domain_offset(0), extents.domain_offset(1),
+                          extents.domain_offset(2), extents.domain_offset(3),
+                          extents.domain_offset(4), extents.domain_offset(5),
+                          extents.domain_offset(6), extents.domain_offset(7)));
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_EXPERIMENTAL_VIEW_ARRAY_MAPPING_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewCtor.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewCtor.hpp
new file mode 100644 (file)
index 0000000..8bc8f86
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_IMPL_VIEW_CTOR_PROP_HPP
+#define KOKKOS_EXPERIMENTAL_IMPL_VIEW_CTOR_PROP_HPP
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+struct WithoutInitializing_t {};
+struct AllowPadding_t {};
+
+template <typename>
+struct is_view_ctor_property : public std::false_type {};
+
+template <>
+struct is_view_ctor_property<WithoutInitializing_t> : public std::true_type {};
+
+template <>
+struct is_view_ctor_property<AllowPadding_t> : public std::true_type {};
+
+//----------------------------------------------------------------------------
+/**\brief Whether a type can be used for a view label */
+
+template <typename>
+struct is_view_label : public std::false_type {};
+
+template <>
+struct is_view_label<std::string> : public std::true_type {};
+
+template <unsigned N>
+struct is_view_label<char[N]> : public std::true_type {};
+
+template <unsigned N>
+struct is_view_label<const char[N]> : public std::true_type {};
+
+//----------------------------------------------------------------------------
+
+template <typename... P>
+struct ViewCtorProp;
+
+// Forward declare
+template <typename Specialize, typename T>
+struct CommonViewAllocProp;
+
+/* Dummy to allow for empty ViewCtorProp object
+ */
+template <>
+struct ViewCtorProp<void> {};
+
+/* Common value_type stored as ViewCtorProp
+ */
+template <typename Specialize, typename T>
+struct ViewCtorProp<void, CommonViewAllocProp<Specialize, T>> {
+  ViewCtorProp()                     = default;
+  ViewCtorProp(const ViewCtorProp &) = default;
+  ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+  using type = CommonViewAllocProp<Specialize, T>;
+
+  KOKKOS_INLINE_FUNCTION
+  ViewCtorProp(const type &arg) : value(arg) {}
+  KOKKOS_INLINE_FUNCTION
+  ViewCtorProp(type &&arg) : value(arg) {}
+
+  type value;
+};
+
+/*  std::integral_constant<unsigned,I> are dummy arguments
+ *  that avoid duplicate base class errors
+ */
+template <unsigned I>
+struct ViewCtorProp<void, std::integral_constant<unsigned, I>> {
+  ViewCtorProp()                     = default;
+  ViewCtorProp(const ViewCtorProp &) = default;
+  ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+  template <typename P>
+  KOKKOS_INLINE_FUNCTION ViewCtorProp(const P &) {}
+};
+
+/* Property flags have constexpr value */
+template <typename P>
+struct ViewCtorProp<
+    std::enable_if_t<std::is_same<P, AllowPadding_t>::value ||
+                     std::is_same<P, WithoutInitializing_t>::value>,
+    P> {
+  ViewCtorProp()                     = default;
+  ViewCtorProp(const ViewCtorProp &) = default;
+  ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+  using type = P;
+
+  ViewCtorProp(const type &) {}
+
+  type value = type();
+};
+
+/* Map input label type to std::string */
+template <typename Label>
+struct ViewCtorProp<std::enable_if_t<is_view_label<Label>::value>, Label> {
+  ViewCtorProp()                     = default;
+  ViewCtorProp(const ViewCtorProp &) = default;
+  ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+  using type = std::string;
+
+  ViewCtorProp(const type &arg) : value(arg) {}
+  ViewCtorProp(type &&arg) : value(arg) {}
+
+  type value;
+};
+
+template <typename Space>
+struct ViewCtorProp<std::enable_if_t<Kokkos::is_memory_space<Space>::value ||
+                                     Kokkos::is_execution_space<Space>::value>,
+                    Space> {
+  ViewCtorProp()                     = default;
+  ViewCtorProp(const ViewCtorProp &) = default;
+  ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+  using type = Space;
+
+  ViewCtorProp(const type &arg) : value(arg) {}
+
+  type value;
+};
+
+template <typename T>
+struct ViewCtorProp<void, T *> {
+  ViewCtorProp()                     = default;
+  ViewCtorProp(const ViewCtorProp &) = default;
+  ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+  using type = T *;
+
+  KOKKOS_INLINE_FUNCTION
+  ViewCtorProp(const type arg) : value(arg) {}
+
+  type value;
+};
+
+// For some reason I don't understand I needed this specialization explicitly
+// for NVCC/MSVC
+template <typename T>
+struct ViewCtorProp<T *> {
+  ViewCtorProp()                     = default;
+  ViewCtorProp(const ViewCtorProp &) = default;
+  ViewCtorProp &operator=(const ViewCtorProp &) = default;
+
+  using type = T *;
+
+  KOKKOS_INLINE_FUNCTION
+  ViewCtorProp(const type arg) : value(arg) {}
+
+  enum : bool { has_pointer = true };
+  using pointer_type = type;
+  type value;
+};
+
+// If we use `ViewCtorProp<Args...>` and `ViewCtorProp<void, Args>...` directly
+// in the parameter lists and base class initializers, respectively, as far as
+// we can tell MSVC 16.5.5+CUDA 10.2 thinks that `ViewCtorProp` refers to the
+// current instantiation, not the template itself, and gets all kinds of
+// confused. To work around this, we just use a couple of alias templates that
+// amount to the same thing.
+template <typename... Args>
+using view_ctor_prop_args = ViewCtorProp<Args...>;
+
+template <typename Arg>
+using view_ctor_prop_base = ViewCtorProp<void, Arg>;
+
+template <typename... P>
+struct ViewCtorProp : public ViewCtorProp<void, P>... {
+ private:
+  using var_memory_space =
+      Kokkos::Impl::has_condition<void, Kokkos::is_memory_space, P...>;
+
+  using var_execution_space =
+      Kokkos::Impl::has_condition<void, Kokkos::is_execution_space, P...>;
+
+  struct VOIDDUMMY {};
+
+  using var_pointer =
+      Kokkos::Impl::has_condition<VOIDDUMMY, std::is_pointer, P...>;
+
+ public:
+  /* Flags for the common properties */
+  enum { has_memory_space = var_memory_space::value };
+  enum { has_execution_space = var_execution_space::value };
+  enum { has_pointer = var_pointer::value };
+  enum { has_label = Kokkos::Impl::has_type<std::string, P...>::value };
+  enum { allow_padding = Kokkos::Impl::has_type<AllowPadding_t, P...>::value };
+  enum {
+    initialize = !Kokkos::Impl::has_type<WithoutInitializing_t, P...>::value
+  };
+
+  using memory_space    = typename var_memory_space::type;
+  using execution_space = typename var_execution_space::type;
+  using pointer_type    = typename var_pointer::type;
+
+  /*  Copy from a matching argument list.
+   *  Requires  std::is_same< P , ViewCtorProp< void , Args >::value ...
+   */
+  template <typename... Args>
+  inline ViewCtorProp(Args const &... args) : ViewCtorProp<void, P>(args)... {}
+
+  template <typename... Args>
+  KOKKOS_INLINE_FUNCTION ViewCtorProp(pointer_type arg0, Args const &... args)
+      : ViewCtorProp<void, pointer_type>(arg0),
+        ViewCtorProp<void, typename ViewCtorProp<void, Args>::type>(args)... {}
+
+  /* Copy from a matching property subset */
+  KOKKOS_INLINE_FUNCTION ViewCtorProp(pointer_type arg0)
+      : ViewCtorProp<void, pointer_type>(arg0) {}
+
+  // If we use `ViewCtorProp<Args...>` and `ViewCtorProp<void, Args>...` here
+  // directly, MSVC 16.5.5+CUDA 10.2 appears to think that `ViewCtorProp` refers
+  // to the current instantiation, not the template itself, and gets all kinds
+  // of confused. To work around this, we just use a couple of alias templates
+  // that amount to the same thing.
+  template <typename... Args>
+  ViewCtorProp(view_ctor_prop_args<Args...> const &arg)
+      : view_ctor_prop_base<Args>(
+            static_cast<view_ctor_prop_base<Args> const &>(arg))... {
+    // Suppress an unused argument warning that (at least at one point) would
+    // show up if sizeof...(Args) == 0
+    (void)arg;
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+namespace Impl {
+struct ViewAllocateWithoutInitializingBackwardCompat {};
+
+template <>
+struct ViewCtorProp<void, ViewAllocateWithoutInitializingBackwardCompat> {};
+
+// NOTE This specialization is meant to be used as the
+// ViewAllocateWithoutInitializing alias below. All it does is add a
+// constructor that takes the label as single argument.
+template <>
+struct ViewCtorProp<WithoutInitializing_t, std::string,
+                    ViewAllocateWithoutInitializingBackwardCompat>
+    : ViewCtorProp<WithoutInitializing_t, std::string>,
+      ViewCtorProp<void, ViewAllocateWithoutInitializingBackwardCompat> {
+  ViewCtorProp(std::string label)
+      : ViewCtorProp<WithoutInitializing_t, std::string>(
+            WithoutInitializing_t(), std::move(label)) {}
+};
+} /* namespace Impl */
+
+using ViewAllocateWithoutInitializing =
+    Impl::ViewCtorProp<Impl::WithoutInitializing_t, std::string,
+                       Impl::ViewAllocateWithoutInitializingBackwardCompat>;
+
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewLayoutTiled.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewLayoutTiled.hpp
new file mode 100644 (file)
index 0000000..8d367ce
--- /dev/null
@@ -0,0 +1,1453 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_VIEWLAYOUTTILE_HPP
+#define KOKKOS_EXPERIMENTAL_VIEWLAYOUTTILE_HPP
+
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_View.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+
+// View offset and mapping for tiled view's
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1>
+struct is_array_layout<Kokkos::Experimental::LayoutTiled<
+    OuterP, InnerP, ArgN0, ArgN1, 0, 0, 0, 0, 0, 0, true>>
+    : public std::true_type {};
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1, unsigned ArgN2>
+struct is_array_layout<Kokkos::Experimental::LayoutTiled<
+    OuterP, InnerP, ArgN0, ArgN1, ArgN2, 0, 0, 0, 0, 0, true>>
+    : public std::true_type {};
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1, unsigned ArgN2, unsigned ArgN3>
+struct is_array_layout<Kokkos::Experimental::LayoutTiled<
+    OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, 0, 0, 0, 0, true>>
+    : public std::true_type {};
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4>
+struct is_array_layout<Kokkos::Experimental::LayoutTiled<
+    OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, 0, 0, 0, true>>
+    : public std::true_type {};
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
+          unsigned ArgN5>
+struct is_array_layout<Kokkos::Experimental::LayoutTiled<
+    OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, ArgN5, 0, 0, true>>
+    : public std::true_type {};
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
+          unsigned ArgN5, unsigned ArgN6>
+struct is_array_layout<Kokkos::Experimental::LayoutTiled<
+    OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, ArgN5, ArgN6, 0, true>>
+    : public std::true_type {};
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
+          unsigned ArgN5, unsigned ArgN6, unsigned ArgN7>
+struct is_array_layout<
+    Kokkos::Experimental::LayoutTiled<OuterP, InnerP, ArgN0, ArgN1, ArgN2,
+                                      ArgN3, ArgN4, ArgN5, ArgN6, ArgN7, true>>
+    : public std::true_type {};
+
+template <class L>
+struct is_array_layout_tiled : public std::false_type {};
+
+template <Kokkos::Iterate OuterP, Kokkos::Iterate InnerP, unsigned ArgN0,
+          unsigned ArgN1, unsigned ArgN2, unsigned ArgN3, unsigned ArgN4,
+          unsigned ArgN5, unsigned ArgN6, unsigned ArgN7, bool IsPowerTwo>
+struct is_array_layout_tiled<Kokkos::Experimental::LayoutTiled<
+    OuterP, InnerP, ArgN0, ArgN1, ArgN2, ArgN3, ArgN4, ArgN5, ArgN6, ArgN7,
+    IsPowerTwo>> : public std::true_type {
+};  // Last template parameter "true" meaning this currently only supports
+    // powers-of-two
+
+namespace Impl {
+
+template <class Dimension, class Layout>
+struct ViewOffset<
+    Dimension, Layout,
+    std::enable_if_t<((Dimension::rank <= 8) && (Dimension::rank >= 2) &&
+                      is_array_layout<Layout>::value &&
+                      is_array_layout_tiled<Layout>::value)>> {
+ public:
+  static constexpr Kokkos::Iterate outer_pattern = Layout::outer_pattern;
+  static constexpr Kokkos::Iterate inner_pattern = Layout::inner_pattern;
+
+  static constexpr int VORank = Dimension::rank;
+
+  static constexpr unsigned SHIFT_0 =
+      Kokkos::Impl::integral_power_of_two(Layout::N0);
+  static constexpr unsigned SHIFT_1 =
+      Kokkos::Impl::integral_power_of_two(Layout::N1);
+  static constexpr unsigned SHIFT_2 =
+      Kokkos::Impl::integral_power_of_two(Layout::N2);
+  static constexpr unsigned SHIFT_3 =
+      Kokkos::Impl::integral_power_of_two(Layout::N3);
+  static constexpr unsigned SHIFT_4 =
+      Kokkos::Impl::integral_power_of_two(Layout::N4);
+  static constexpr unsigned SHIFT_5 =
+      Kokkos::Impl::integral_power_of_two(Layout::N5);
+  static constexpr unsigned SHIFT_6 =
+      Kokkos::Impl::integral_power_of_two(Layout::N6);
+  static constexpr unsigned SHIFT_7 =
+      Kokkos::Impl::integral_power_of_two(Layout::N7);
+  static constexpr int MASK_0 = Layout::N0 - 1;
+  static constexpr int MASK_1 = Layout::N1 - 1;
+  static constexpr int MASK_2 = Layout::N2 - 1;
+  static constexpr int MASK_3 = Layout::N3 - 1;
+  static constexpr int MASK_4 = Layout::N4 - 1;
+  static constexpr int MASK_5 = Layout::N5 - 1;
+  static constexpr int MASK_6 = Layout::N6 - 1;
+  static constexpr int MASK_7 = Layout::N7 - 1;
+
+  static constexpr unsigned SHIFT_2T = SHIFT_0 + SHIFT_1;
+  static constexpr unsigned SHIFT_3T = SHIFT_0 + SHIFT_1 + SHIFT_2;
+  static constexpr unsigned SHIFT_4T = SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3;
+  static constexpr unsigned SHIFT_5T =
+      SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4;
+  static constexpr unsigned SHIFT_6T =
+      SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4 + SHIFT_5;
+  static constexpr unsigned SHIFT_7T =
+      SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4 + SHIFT_5 + SHIFT_6;
+  static constexpr unsigned SHIFT_8T = SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 +
+                                       SHIFT_4 + SHIFT_5 + SHIFT_6 + SHIFT_7;
+
+  // Is an irregular layout that does not have uniform striding for each index.
+  using is_mapping_plugin = std::true_type;
+  using is_regular        = std::false_type;
+
+  using size_type      = size_t;
+  using dimension_type = Dimension;
+  using array_layout   = Layout;
+
+  dimension_type m_dim;
+  size_type m_tile_N0;  // Num tiles dim 0
+  size_type m_tile_N1;
+  size_type m_tile_N2;
+  size_type m_tile_N3;
+  size_type m_tile_N4;
+  size_type m_tile_N5;
+  size_type m_tile_N6;
+  size_type m_tile_N7;
+
+  //----------------------------------------
+
+#define DEBUG_OUTPUT_CHECK 0
+
+  // Rank 2
+  template <typename I0, typename I1>
+  KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0,
+                                              I1 const& i1) const {
+    auto tile_offset =
+        (outer_pattern == (Kokkos::Iterate::Left))
+            ? (((i0 >> SHIFT_0) + m_tile_N0 * ((i1 >> SHIFT_1))) << SHIFT_2T)
+            : (((m_tile_N1 * (i0 >> SHIFT_0) + (i1 >> SHIFT_1))) << SHIFT_2T);
+    //                     ( num_tiles[1] * ti0     +  ti1 ) * FTD
+
+    auto local_offset = (inner_pattern == (Kokkos::Iterate::Left))
+                            ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0))
+                            : (((i0 & MASK_0) << SHIFT_1) + (i1 & MASK_1));
+    //                     ( tile_dim[1] * li0         +  li1 )
+
+#if DEBUG_OUTPUT_CHECK
+    std::cout << "Am I Outer Left? "
+              << (outer_pattern == (Kokkos::Iterate::Left)) << std::endl;
+    std::cout << "Am I Inner Left? "
+              << (inner_pattern == (Kokkos::Iterate::Left)) << std::endl;
+    std::cout << "i0 = " << i0 << " i1 = " << i1
+              << "\ntilei0 = " << (i0 >> SHIFT_0)
+              << " tilei1 = " << (i1 >> SHIFT_1)
+              << "locali0 = " << (i0 & MASK_0)
+              << "\nlocali1 = " << (i1 & MASK_1) << std::endl;
+#endif
+
+    return tile_offset + local_offset;
+  }
+
+  // Rank 3
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
+                                              I2 const& i2) const {
+    auto tile_offset =
+        (outer_pattern == Kokkos::Iterate::Left)
+            ? (((i0 >> SHIFT_0) +
+                m_tile_N0 * ((i1 >> SHIFT_1) + m_tile_N1 * (i2 >> SHIFT_2)))
+               << SHIFT_3T)
+            : ((m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) + (i1 >> SHIFT_1)) +
+                (i2 >> SHIFT_2))
+               << SHIFT_3T);
+
+    auto local_offset = (inner_pattern == Kokkos::Iterate::Left)
+                            ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
+                               ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)))
+                            : (((i0 & MASK_0) << (SHIFT_2 + SHIFT_1)) +
+                               ((i1 & MASK_1) << (SHIFT_2)) + (i2 & MASK_2));
+
+#if DEBUG_OUTPUT_CHECK
+    std::cout << "Am I Outer Left? "
+              << (outer_pattern == (Kokkos::Iterate::Left)) << std::endl;
+    std::cout << "Am I Inner Left? "
+              << (inner_pattern == (Kokkos::Iterate::Left)) << std::endl;
+    std::cout << "i0 = " << i0 << " i1 = " << i1 << " i2 = " << i2
+              << "\ntilei0 = " << (i0 >> SHIFT_0)
+              << " tilei1 = " << (i1 >> SHIFT_1)
+              << " tilei2 = " << (i2 >> SHIFT_2)
+              << "\nlocali0 = " << (i0 & MASK_0)
+              << "locali1 = " << (i1 & MASK_1) << "locali2 = " << (i2 & MASK_2)
+              << std::endl;
+#endif
+
+    return tile_offset + local_offset;
+  }
+
+  // Rank 4
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
+                                              I2 const& i2,
+                                              I3 const& i3) const {
+    auto tile_offset =
+        (outer_pattern == Kokkos::Iterate::Left)
+            ? (((i0 >> SHIFT_0) +
+                m_tile_N0 * ((i1 >> SHIFT_1) +
+                             m_tile_N1 * ((i2 >> SHIFT_2) +
+                                          m_tile_N2 * (i3 >> SHIFT_3))))
+               << SHIFT_4T)
+            : ((m_tile_N3 * (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
+                                          (i1 >> SHIFT_1)) +
+                             (i2 >> SHIFT_2)) +
+                (i3 >> SHIFT_3))
+               << SHIFT_4T);
+
+    auto local_offset =
+        (inner_pattern == Kokkos::Iterate::Left)
+            ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
+               ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
+               ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)))
+            : (((i0 & MASK_0) << (SHIFT_3 + SHIFT_2 + SHIFT_1)) +
+               ((i1 & MASK_1) << (SHIFT_3 + SHIFT_2)) +
+               ((i2 & MASK_2) << (SHIFT_3)) + (i3 & MASK_3));
+
+    return tile_offset + local_offset;
+  }
+
+  // Rank 5
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
+                                              I2 const& i2, I3 const& i3,
+                                              I4 const& i4) const {
+    auto tile_offset =
+        (outer_pattern == Kokkos::Iterate::Left)
+            ? (((i0 >> SHIFT_0) +
+                m_tile_N0 *
+                    ((i1 >> SHIFT_1) +
+                     m_tile_N1 * ((i2 >> SHIFT_2) +
+                                  m_tile_N2 * ((i3 >> SHIFT_3) +
+                                               m_tile_N3 * (i4 >> SHIFT_4)))))
+               << SHIFT_5T)
+            : ((m_tile_N4 *
+                    (m_tile_N3 * (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
+                                               (i1 >> SHIFT_1)) +
+                                  (i2 >> SHIFT_2)) +
+                     (i3 >> SHIFT_3)) +
+                (i4 >> SHIFT_4))
+               << SHIFT_5T);
+
+    auto local_offset =
+        (inner_pattern == Kokkos::Iterate::Left)
+            ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
+               ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
+               ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
+               ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)))
+            : (((i0 & MASK_0) << (SHIFT_4 + SHIFT_3 + SHIFT_2 + SHIFT_1)) +
+               ((i1 & MASK_1) << (SHIFT_4 + SHIFT_3 + SHIFT_2)) +
+               ((i2 & MASK_2) << (SHIFT_4 + SHIFT_3)) +
+               ((i3 & MASK_3) << (SHIFT_4)) + (i4 & MASK_4));
+
+    return tile_offset + local_offset;
+  }
+
+  // Rank 6
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
+                                              I2 const& i2, I3 const& i3,
+                                              I4 const& i4,
+                                              I5 const& i5) const {
+    auto tile_offset =
+        (outer_pattern == Kokkos::Iterate::Left)
+            ? (((i0 >> SHIFT_0) +
+                m_tile_N0 *
+                    ((i1 >> SHIFT_1) +
+                     m_tile_N1 *
+                         ((i2 >> SHIFT_2) +
+                          m_tile_N2 *
+                              ((i3 >> SHIFT_3) +
+                               m_tile_N3 * ((i4 >> SHIFT_4) +
+                                            m_tile_N4 * (i5 >> SHIFT_5))))))
+               << SHIFT_6T)
+            : ((m_tile_N5 *
+                    (m_tile_N4 *
+                         (m_tile_N3 *
+                              (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
+                                            (i1 >> SHIFT_1)) +
+                               (i2 >> SHIFT_2)) +
+                          (i3 >> SHIFT_3)) +
+                     (i4 >> SHIFT_4)) +
+                (i5 >> SHIFT_5))
+               << SHIFT_6T);
+
+    auto local_offset =
+        (inner_pattern == Kokkos::Iterate::Left)
+            ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
+               ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
+               ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
+               ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)) +
+               ((i5 & MASK_5)
+                << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4)))
+            : (((i0 & MASK_0)
+                << (SHIFT_5 + SHIFT_4 + SHIFT_3 + SHIFT_2 + SHIFT_1)) +
+               ((i1 & MASK_1) << (SHIFT_5 + SHIFT_4 + SHIFT_3 + SHIFT_2)) +
+               ((i2 & MASK_2) << (SHIFT_5 + SHIFT_4 + SHIFT_3)) +
+               ((i3 & MASK_3) << (SHIFT_5 + SHIFT_4)) +
+               ((i4 & MASK_4) << (SHIFT_5)) + (i5 & MASK_5));
+
+    return tile_offset + local_offset;
+  }
+
+  // Rank 7
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
+                                              I2 const& i2, I3 const& i3,
+                                              I4 const& i4, I5 const& i5,
+                                              I6 const& i6) const {
+    auto tile_offset =
+        (outer_pattern == Kokkos::Iterate::Left)
+            ? (((i0 >> SHIFT_0) +
+                m_tile_N0 *
+                    ((i1 >> SHIFT_1) +
+                     m_tile_N1 *
+                         ((i2 >> SHIFT_2) +
+                          m_tile_N2 *
+                              ((i3 >> SHIFT_3) +
+                               m_tile_N3 *
+                                   ((i4 >> SHIFT_4) +
+                                    m_tile_N4 *
+                                        ((i5 >> SHIFT_5) +
+                                         m_tile_N5 * (i6 >> SHIFT_6)))))))
+               << SHIFT_7T)
+            : ((m_tile_N6 *
+                    (m_tile_N5 *
+                         (m_tile_N4 *
+                              (m_tile_N3 *
+                                   (m_tile_N2 * (m_tile_N1 * (i0 >> SHIFT_0) +
+                                                 (i1 >> SHIFT_1)) +
+                                    (i2 >> SHIFT_2)) +
+                               (i3 >> SHIFT_3)) +
+                          (i4 >> SHIFT_4)) +
+                     (i5 >> SHIFT_5)) +
+                (i6 >> SHIFT_6))
+               << SHIFT_7T);
+
+    auto local_offset =
+        (inner_pattern == Kokkos::Iterate::Left)
+            ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
+               ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
+               ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
+               ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)) +
+               ((i5 & MASK_5)
+                << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4)) +
+               ((i6 & MASK_6)
+                << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4 + SHIFT_5)))
+            : (((i0 & MASK_0) << (SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3 +
+                                  SHIFT_2 + SHIFT_1)) +
+               ((i1 & MASK_1)
+                << (SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3 + SHIFT_2)) +
+               ((i2 & MASK_2) << (SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3)) +
+               ((i3 & MASK_3) << (SHIFT_6 + SHIFT_5 + SHIFT_4)) +
+               ((i4 & MASK_4) << (SHIFT_6 + SHIFT_5)) +
+               ((i5 & MASK_5) << (SHIFT_6)) + (i6 & MASK_6));
+
+    return tile_offset + local_offset;
+  }
+
+  // Rank 8
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_INLINE_FUNCTION size_type operator()(I0 const& i0, I1 const& i1,
+                                              I2 const& i2, I3 const& i3,
+                                              I4 const& i4, I5 const& i5,
+                                              I6 const& i6,
+                                              I7 const& i7) const {
+    auto tile_offset =
+        (outer_pattern == Kokkos::Iterate::Left)
+            ? (((i0 >> SHIFT_0) +
+                m_tile_N0 *
+                    ((i1 >> SHIFT_1) +
+                     m_tile_N1 *
+                         ((i2 >> SHIFT_2) +
+                          m_tile_N2 *
+                              ((i3 >> SHIFT_3) +
+                               m_tile_N3 *
+                                   ((i4 >> SHIFT_4) +
+                                    m_tile_N4 *
+                                        ((i5 >> SHIFT_5) +
+                                         m_tile_N5 *
+                                             ((i6 >> SHIFT_6) +
+                                              m_tile_N6 * (i7 >> SHIFT_7))))))))
+               << SHIFT_8T)
+            : ((m_tile_N7 *
+                    (m_tile_N6 *
+                         (m_tile_N5 *
+                              (m_tile_N4 *
+                                   (m_tile_N3 *
+                                        (m_tile_N2 *
+                                             (m_tile_N1 * (i0 >> SHIFT_0) +
+                                              (i1 >> SHIFT_1)) +
+                                         (i2 >> SHIFT_2)) +
+                                    (i3 >> SHIFT_3)) +
+                               (i4 >> SHIFT_4)) +
+                          (i5 >> SHIFT_5)) +
+                     (i6 >> SHIFT_6)) +
+                (i7 >> SHIFT_7))
+               << SHIFT_8T);
+
+    auto local_offset =
+        (inner_pattern == Kokkos::Iterate::Left)
+            ? ((i0 & MASK_0) + ((i1 & MASK_1) << SHIFT_0) +
+               ((i2 & MASK_2) << (SHIFT_0 + SHIFT_1)) +
+               ((i3 & MASK_3) << (SHIFT_0 + SHIFT_1 + SHIFT_2)) +
+               ((i4 & MASK_4) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3)) +
+               ((i5 & MASK_5)
+                << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 + SHIFT_4)) +
+               ((i6 & MASK_6) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 +
+                                  SHIFT_4 + SHIFT_5)) +
+               ((i7 & MASK_7) << (SHIFT_0 + SHIFT_1 + SHIFT_2 + SHIFT_3 +
+                                  SHIFT_4 + SHIFT_5 + SHIFT_6)))
+            : (((i0 & MASK_0) << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4 +
+                                  SHIFT_3 + SHIFT_2 + SHIFT_1)) +
+               ((i1 & MASK_1) << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4 +
+                                  SHIFT_3 + SHIFT_2)) +
+               ((i2 & MASK_2)
+                << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4 + SHIFT_3)) +
+               ((i3 & MASK_3) << (SHIFT_7 + SHIFT_6 + SHIFT_5 + SHIFT_4)) +
+               ((i4 & MASK_4) << (SHIFT_7 + SHIFT_6 + SHIFT_5)) +
+               ((i5 & MASK_5) << (SHIFT_7 + SHIFT_6)) +
+               ((i6 & MASK_6) << (SHIFT_7)) + (i7 & MASK_7));
+
+    return tile_offset + local_offset;
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION constexpr array_layout layout() const {
+    return array_layout((VORank > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
+                        (VORank > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
+                        (VORank > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
+                        (VORank > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
+                        (VORank > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
+                        (VORank > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
+                        (VORank > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
+                        (VORank > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
+    return m_dim.N0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_1() const {
+    return m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_2() const {
+    return m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_3() const {
+    return m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_4() const {
+    return m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_5() const {
+    return m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_6() const {
+    return m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_7() const {
+    return m_dim.N7;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type size() const {
+    return m_dim.N0 * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 * m_dim.N5 *
+           m_dim.N6 * m_dim.N7;
+  }
+
+  // Strides are meaningless due to irregularity
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_6() const { return 0; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_7() const { return 0; }
+
+  // Stride with [ rank ] value is the total length
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    s[0] = 0;
+    if (0 < dimension_type::rank) {
+      s[1] = 0;
+    }
+    if (1 < dimension_type::rank) {
+      s[2] = 0;
+    }
+    if (2 < dimension_type::rank) {
+      s[3] = 0;
+    }
+    if (3 < dimension_type::rank) {
+      s[4] = 0;
+    }
+    if (4 < dimension_type::rank) {
+      s[5] = 0;
+    }
+    if (5 < dimension_type::rank) {
+      s[6] = 0;
+    }
+    if (6 < dimension_type::rank) {
+      s[7] = 0;
+    }
+    if (7 < dimension_type::rank) {
+      s[8] = 0;
+    }
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type span() const {
+    // Rank2: ( NumTile0 * ( NumTile1 ) ) * TileSize, etc
+    return (VORank == 2)
+               ? (m_tile_N0 * m_tile_N1) << SHIFT_2T
+               : (VORank == 3)
+                     ? (m_tile_N0 * m_tile_N1 * m_tile_N2) << SHIFT_3T
+                     : (VORank == 4)
+                           ? (m_tile_N0 * m_tile_N1 * m_tile_N2 * m_tile_N3)
+                                 << SHIFT_4T
+                           : (VORank == 5)
+                                 ? (m_tile_N0 * m_tile_N1 * m_tile_N2 *
+                                    m_tile_N3 * m_tile_N4)
+                                       << SHIFT_5T
+                                 : (VORank == 6)
+                                       ? (m_tile_N0 * m_tile_N1 * m_tile_N2 *
+                                          m_tile_N3 * m_tile_N4 * m_tile_N5)
+                                             << SHIFT_6T
+                                       : (VORank == 7)
+                                             ? (m_tile_N0 * m_tile_N1 *
+                                                m_tile_N2 * m_tile_N3 *
+                                                m_tile_N4 * m_tile_N5 *
+                                                m_tile_N6)
+                                                   << SHIFT_7T
+                                             : (m_tile_N0 * m_tile_N1 *
+                                                m_tile_N2 * m_tile_N3 *
+                                                m_tile_N4 * m_tile_N5 *
+                                                m_tile_N6 * m_tile_N7)
+                                                   << SHIFT_8T;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return true;
+  }
+
+  //----------------------------------------
+#ifdef KOKKOS_IMPL_WINDOWS_CUDA
+  KOKKOS_FUNCTION ViewOffset() {}
+  KOKKOS_FUNCTION ViewOffset(const ViewOffset& src) {
+    m_dim     = src.m_dim;
+    m_tile_N0 = src.m_tile_N0;
+    m_tile_N1 = src.m_tile_N1;
+    m_tile_N2 = src.m_tile_N2;
+    m_tile_N3 = src.m_tile_N3;
+    m_tile_N4 = src.m_tile_N4;
+    m_tile_N5 = src.m_tile_N5;
+    m_tile_N6 = src.m_tile_N6;
+    m_tile_N7 = src.m_tile_N7;
+  }
+  KOKKOS_FUNCTION ViewOffset& operator=(const ViewOffset& src) {
+    m_dim     = src.m_dim;
+    m_tile_N0 = src.m_tile_N0;
+    m_tile_N1 = src.m_tile_N1;
+    m_tile_N2 = src.m_tile_N2;
+    m_tile_N3 = src.m_tile_N3;
+    m_tile_N4 = src.m_tile_N4;
+    m_tile_N5 = src.m_tile_N5;
+    m_tile_N6 = src.m_tile_N6;
+    m_tile_N7 = src.m_tile_N7;
+    return *this;
+  }
+#else
+  KOKKOS_DEFAULTED_FUNCTION ~ViewOffset()                 = default;
+  KOKKOS_DEFAULTED_FUNCTION ViewOffset()                  = default;
+  KOKKOS_DEFAULTED_FUNCTION ViewOffset(const ViewOffset&) = default;
+  KOKKOS_DEFAULTED_FUNCTION ViewOffset& operator=(const ViewOffset&) = default;
+#endif
+
+  template <unsigned TrivialScalarSize>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      std::integral_constant<unsigned, TrivialScalarSize> const&,
+      array_layout const arg_layout)
+      : m_dim(arg_layout.dimension[0], arg_layout.dimension[1],
+              arg_layout.dimension[2], arg_layout.dimension[3],
+              arg_layout.dimension[4], arg_layout.dimension[5],
+              arg_layout.dimension[6], arg_layout.dimension[7]),
+        m_tile_N0((arg_layout.dimension[0] + MASK_0) >>
+                  SHIFT_0 /* number of tiles in first dimension */),
+        m_tile_N1((arg_layout.dimension[1] + MASK_1) >> SHIFT_1),
+        m_tile_N2((VORank > 2) ? (arg_layout.dimension[2] + MASK_2) >> SHIFT_2
+                               : 0),
+        m_tile_N3((VORank > 3) ? (arg_layout.dimension[3] + MASK_3) >> SHIFT_3
+                               : 0),
+        m_tile_N4((VORank > 4) ? (arg_layout.dimension[4] + MASK_4) >> SHIFT_4
+                               : 0),
+        m_tile_N5((VORank > 5) ? (arg_layout.dimension[5] + MASK_5) >> SHIFT_5
+                               : 0),
+        m_tile_N6((VORank > 6) ? (arg_layout.dimension[6] + MASK_6) >> SHIFT_6
+                               : 0),
+        m_tile_N7((VORank > 7) ? (arg_layout.dimension[7] + MASK_7) >> SHIFT_7
+                               : 0) {}
+};
+
+// FIXME Remove the out-of-class definitions when we require C++17
+#define KOKKOS_ITERATE_VIEW_OFFSET_ENABLE                               \
+  std::enable_if_t<((Dimension::rank <= 8) && (Dimension::rank >= 2) && \
+                    is_array_layout<Layout>::value &&                   \
+                    is_array_layout_tiled<Layout>::value)>
+template <class Dimension, class Layout>
+constexpr Kokkos::Iterate ViewOffset<
+    Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::outer_pattern;
+template <class Dimension, class Layout>
+constexpr Kokkos::Iterate ViewOffset<
+    Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::inner_pattern;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::VORank;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_0;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_1;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_2;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_3;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_4;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_5;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_6;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_7;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_0;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_1;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_2;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_3;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_4;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_5;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_6;
+template <class Dimension, class Layout>
+constexpr int
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::MASK_7;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_2T;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_3T;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_4T;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_5T;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_6T;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_7T;
+template <class Dimension, class Layout>
+constexpr unsigned
+    ViewOffset<Dimension, Layout, KOKKOS_ITERATE_VIEW_OFFSET_ENABLE>::SHIFT_8T;
+#undef KOKKOS_ITERATE_VIEW_OFFSET_ENABLE
+
+//----------------------------------------
+
+// ViewMapping assign method needed in order to return a 'subview' tile as a
+// proper View The outer iteration pattern determines the mapping of the pointer
+// offset to the beginning of requested tile The inner iteration pattern is
+// needed for the layout of the tile's View to be returned Rank 2
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
+          typename iType1>
+class ViewMapping<std::enable_if_t<(N2 == 0 && N3 == 0 && N4 == 0 && N5 == 0 &&
+                                    N6 == 0 && N7 == 0)>  // void
+                  ,
+                  Kokkos::ViewTraits<
+                      T**,
+                      Kokkos::Experimental::LayoutTiled<
+                          OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                      P...>,
+                  Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
+                                                    N3, N4, N5, N6, N7, true>,
+                  iType0, iType1> {
+ public:
+  using src_layout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+  using src_traits = Kokkos::ViewTraits<T**, src_layout, P...>;
+
+  static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
+  static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
+  using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
+                                          Kokkos::LayoutRight>;
+  using traits       = Kokkos::ViewTraits<T[N0][N1], array_layout, P...>;
+  using type         = Kokkos::View<T[N0][N1], array_layout, P...>;
+
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
+      const src_layout&, const iType0 i_tile0, const iType1 i_tile1) {
+    using dst_map_type    = ViewMapping<traits, void>;
+    using src_map_type    = ViewMapping<src_traits, void>;
+    using dst_handle_type = typename dst_map_type::handle_type;
+    using dst_offset_type = typename dst_map_type::offset_type;
+    using src_offset_type = typename src_map_type::offset_type;
+
+    dst = dst_map_type(
+        dst_handle_type(
+            src.m_impl_handle +
+            (is_outer_left ? ((i_tile0 + src.m_impl_offset.m_tile_N0 * i_tile1)
+                              << src_offset_type::SHIFT_2T)
+                           : ((src.m_impl_offset.m_tile_N1 * i_tile0 + i_tile1)
+                              << src_offset_type::SHIFT_2T))  // offset to start
+                                                              // of the tile
+            ),
+        dst_offset_type());
+  }
+};
+
+// Rank 3
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
+          typename iType1, typename iType2>
+class ViewMapping<std::enable_if_t<(N3 == 0 && N4 == 0 && N5 == 0 && N6 == 0 &&
+                                    N7 == 0)>  // void
+                  ,
+                  Kokkos::ViewTraits<
+                      T***,
+                      Kokkos::Experimental::LayoutTiled<
+                          OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                      P...>,
+                  Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
+                                                    N3, N4, N5, N6, N7, true>,
+                  iType0, iType1, iType2> {
+ public:
+  using src_layout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+  using src_traits = Kokkos::ViewTraits<T***, src_layout, P...>;
+
+  static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
+  static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
+  using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
+                                          Kokkos::LayoutRight>;
+  using traits       = Kokkos::ViewTraits<T[N0][N1][N2], array_layout, P...>;
+  using type         = Kokkos::View<T[N0][N1][N2], array_layout, P...>;
+
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
+      const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
+      const iType2 i_tile2) {
+    using dst_map_type    = ViewMapping<traits, void>;
+    using src_map_type    = ViewMapping<src_traits, void>;
+    using dst_handle_type = typename dst_map_type::handle_type;
+    using dst_offset_type = typename dst_map_type::offset_type;
+    using src_offset_type = typename src_map_type::offset_type;
+
+    dst = dst_map_type(
+        dst_handle_type(
+            src.m_impl_handle +
+            (is_outer_left
+                 ? ((i_tile0 +
+                     src.m_impl_offset.m_tile_N0 *
+                         (i_tile1 + src.m_impl_offset.m_tile_N1 * i_tile2))
+                    << src_offset_type::SHIFT_3T)
+                 : ((src.m_impl_offset.m_tile_N2 *
+                         (src.m_impl_offset.m_tile_N1 * i_tile0 + i_tile1) +
+                     i_tile2)
+                    << src_offset_type::SHIFT_3T)))  // offset to start of the
+                                                     // tile
+        ,
+        dst_offset_type());
+  }
+};
+
+// Rank 4
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
+          typename iType1, typename iType2, typename iType3>
+class ViewMapping<
+    std::enable_if_t<(N4 == 0 && N5 == 0 && N6 == 0 && N7 == 0)>  // void
+    ,
+    Kokkos::ViewTraits<
+        T****,
+        Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4,
+                                          N5, N6, N7, true>,
+        P...>,
+    Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                      N6, N7, true>,
+    iType0, iType1, iType2, iType3> {
+ public:
+  using src_layout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+  using src_traits = Kokkos::ViewTraits<T****, src_layout, P...>;
+
+  static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
+  static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
+  using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
+                                          Kokkos::LayoutRight>;
+  using traits = Kokkos::ViewTraits<T[N0][N1][N2][N3], array_layout, P...>;
+  using type   = Kokkos::View<T[N0][N1][N2][N3], array_layout, P...>;
+
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
+      const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
+      const iType2 i_tile2, const iType3 i_tile3) {
+    using dst_map_type    = ViewMapping<traits, void>;
+    using src_map_type    = ViewMapping<src_traits, void>;
+    using dst_handle_type = typename dst_map_type::handle_type;
+    using dst_offset_type = typename dst_map_type::offset_type;
+    using src_offset_type = typename src_map_type::offset_type;
+
+    dst = dst_map_type(
+        dst_handle_type(
+            src.m_impl_handle +
+            (is_outer_left
+                 ? ((i_tile0 +
+                     src.m_impl_offset.m_tile_N0 *
+                         (i_tile1 + src.m_impl_offset.m_tile_N1 *
+                                        (i_tile2 + src.m_impl_offset.m_tile_N2 *
+                                                       i_tile3)))
+                    << src_offset_type::SHIFT_4T)
+                 : ((src.m_impl_offset.m_tile_N3 *
+                         (src.m_impl_offset.m_tile_N2 *
+                              (src.m_impl_offset.m_tile_N1 * i_tile0 +
+                               i_tile1) +
+                          i_tile2) +
+                     i_tile3)
+                    << src_offset_type::SHIFT_4T)))  // offset to start of the
+                                                     // tile
+        ,
+        dst_offset_type());
+  }
+};
+
+// Rank 5
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
+          typename iType1, typename iType2, typename iType3, typename iType4>
+class ViewMapping<std::enable_if_t<(N5 == 0 && N6 == 0 && N7 == 0)>  // void
+                  ,
+                  Kokkos::ViewTraits<
+                      T*****,
+                      Kokkos::Experimental::LayoutTiled<
+                          OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                      P...>,
+                  Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
+                                                    N3, N4, N5, N6, N7, true>,
+                  iType0, iType1, iType2, iType3, iType4> {
+ public:
+  using src_layout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+  using src_traits = Kokkos::ViewTraits<T*****, src_layout, P...>;
+
+  static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
+  static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
+  using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
+                                          Kokkos::LayoutRight>;
+  using traits = Kokkos::ViewTraits<T[N0][N1][N2][N3][N4], array_layout, P...>;
+  using type   = Kokkos::View<T[N0][N1][N2][N3][N4], array_layout, P...>;
+
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
+      const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
+      const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4) {
+    using dst_map_type    = ViewMapping<traits, void>;
+    using src_map_type    = ViewMapping<src_traits, void>;
+    using dst_handle_type = typename dst_map_type::handle_type;
+    using dst_offset_type = typename dst_map_type::offset_type;
+    using src_offset_type = typename src_map_type::offset_type;
+
+    dst = dst_map_type(
+        dst_handle_type(
+            src.m_impl_handle +
+            (is_outer_left
+                 ? ((i_tile0 +
+                     src.m_impl_offset.m_tile_N0 *
+                         (i_tile1 +
+                          src.m_impl_offset.m_tile_N1 *
+                              (i_tile2 +
+                               src.m_impl_offset.m_tile_N2 *
+                                   (i_tile3 +
+                                    src.m_impl_offset.m_tile_N3 * i_tile4))))
+                    << src_offset_type::SHIFT_5T)
+                 : ((src.m_impl_offset.m_tile_N4 *
+                         (src.m_impl_offset.m_tile_N3 *
+                              (src.m_impl_offset.m_tile_N2 *
+                                   (src.m_impl_offset.m_tile_N1 * i_tile0 +
+                                    i_tile1) +
+                               i_tile2) +
+                          i_tile3) +
+                     i_tile4)
+                    << src_offset_type::SHIFT_5T)))  // offset to start of the
+                                                     // tile
+        ,
+        dst_offset_type());
+  }
+};
+
+// Rank 6
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
+          typename iType1, typename iType2, typename iType3, typename iType4,
+          typename iType5>
+class ViewMapping<std::enable_if_t<(N6 == 0 && N7 == 0)>  // void
+                  ,
+                  Kokkos::ViewTraits<
+                      T******,
+                      Kokkos::Experimental::LayoutTiled<
+                          OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                      P...>,
+                  Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
+                                                    N3, N4, N5, N6, N7, true>,
+                  iType0, iType1, iType2, iType3, iType4, iType5> {
+ public:
+  using src_layout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+  using src_traits = Kokkos::ViewTraits<T******, src_layout, P...>;
+
+  static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
+  static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
+  using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
+                                          Kokkos::LayoutRight>;
+  using traits =
+      Kokkos::ViewTraits<T[N0][N1][N2][N3][N4][N5], array_layout, P...>;
+  using type = Kokkos::View<T[N0][N1][N2][N3][N4][N5], array_layout, P...>;
+
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
+      const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
+      const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4,
+      const iType5 i_tile5) {
+    using dst_map_type    = ViewMapping<traits, void>;
+    using src_map_type    = ViewMapping<src_traits, void>;
+    using dst_handle_type = typename dst_map_type::handle_type;
+    using dst_offset_type = typename dst_map_type::offset_type;
+    using src_offset_type = typename src_map_type::offset_type;
+
+    dst = dst_map_type(
+        dst_handle_type(
+            src.m_impl_handle +
+            (is_outer_left
+                 ? ((i_tile0 +
+                     src.m_impl_offset.m_tile_N0 *
+                         (i_tile1 +
+                          src.m_impl_offset.m_tile_N1 *
+                              (i_tile2 +
+                               src.m_impl_offset.m_tile_N2 *
+                                   (i_tile3 +
+                                    src.m_impl_offset.m_tile_N3 *
+                                        (i_tile4 + src.m_impl_offset.m_tile_N4 *
+                                                       i_tile5)))))
+                    << src_offset_type::SHIFT_6T)
+                 : ((src.m_impl_offset.m_tile_N5 *
+                         (src.m_impl_offset.m_tile_N4 *
+                              (src.m_impl_offset.m_tile_N3 *
+                                   (src.m_impl_offset.m_tile_N2 *
+                                        (src.m_impl_offset.m_tile_N1 * i_tile0 +
+                                         i_tile1) +
+                                    i_tile2) +
+                               i_tile3) +
+                          i_tile4) +
+                     i_tile5)
+                    << src_offset_type::SHIFT_6T)))  // offset to start of the
+                                                     // tile
+        ,
+        dst_offset_type());
+  }
+};
+
+// Rank 7
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
+          typename iType1, typename iType2, typename iType3, typename iType4,
+          typename iType5, typename iType6>
+class ViewMapping<std::enable_if_t<(N7 == 0)>  // void
+                  ,
+                  Kokkos::ViewTraits<
+                      T*******,
+                      Kokkos::Experimental::LayoutTiled<
+                          OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                      P...>,
+                  Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2,
+                                                    N3, N4, N5, N6, N7, true>,
+                  iType0, iType1, iType2, iType3, iType4, iType5, iType6> {
+ public:
+  using src_layout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+  using src_traits = Kokkos::ViewTraits<T*******, src_layout, P...>;
+
+  static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
+  static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
+  using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
+                                          Kokkos::LayoutRight>;
+  using traits =
+      Kokkos::ViewTraits<T[N0][N1][N2][N3][N4][N5][N6], array_layout, P...>;
+  using type = Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6], array_layout, P...>;
+
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
+      const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
+      const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4,
+      const iType5 i_tile5, const iType6 i_tile6) {
+    using dst_map_type    = ViewMapping<traits, void>;
+    using src_map_type    = ViewMapping<src_traits, void>;
+    using dst_handle_type = typename dst_map_type::handle_type;
+    using dst_offset_type = typename dst_map_type::offset_type;
+    using src_offset_type = typename src_map_type::offset_type;
+
+    dst = dst_map_type(
+        dst_handle_type(
+            src.m_impl_handle +
+            (is_outer_left
+                 ? ((i_tile0 +
+                     src.m_impl_offset.m_tile_N0 *
+                         (i_tile1 +
+                          src.m_impl_offset.m_tile_N1 *
+                              (i_tile2 +
+                               src.m_impl_offset.m_tile_N2 *
+                                   (i_tile3 +
+                                    src.m_impl_offset.m_tile_N3 *
+                                        (i_tile4 +
+                                         src.m_impl_offset.m_tile_N4 *
+                                             (i_tile5 +
+                                              src.m_impl_offset.m_tile_N5 *
+                                                  i_tile6))))))
+                    << src_offset_type::SHIFT_7T)
+                 : ((src.m_impl_offset.m_tile_N6 *
+                         (src.m_impl_offset.m_tile_N5 *
+                              (src.m_impl_offset.m_tile_N4 *
+                                   (src.m_impl_offset.m_tile_N3 *
+                                        (src.m_impl_offset.m_tile_N2 *
+                                             (src.m_impl_offset.m_tile_N1 *
+                                                  i_tile0 +
+                                              i_tile1) +
+                                         i_tile2) +
+                                    i_tile3) +
+                               i_tile4) +
+                          i_tile5) +
+                     i_tile6)
+                    << src_offset_type::SHIFT_7T)))  // offset to start of the
+                                                     // tile
+        ,
+        dst_offset_type());
+  }
+};
+
+// Rank 8
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P, typename iType0,
+          typename iType1, typename iType2, typename iType3, typename iType4,
+          typename iType5, typename iType6, typename iType7>
+class ViewMapping<
+    std::enable_if_t<(N0 != 0 && N1 != 0 && N2 != 0 && N3 != 0 && N4 != 0 &&
+                      N5 != 0 && N6 != 0 && N7 != 0)>  // void
+    ,
+    Kokkos::ViewTraits<
+        T********,
+        Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4,
+                                          N5, N6, N7, true>,
+        P...>,
+    Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                      N6, N7, true>,
+    iType0, iType1, iType2, iType3, iType4, iType5, iType6, iType7> {
+ public:
+  using src_layout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+  using src_traits = Kokkos::ViewTraits<T********, src_layout, P...>;
+
+  static constexpr bool is_outer_left = (OuterP == Kokkos::Iterate::Left);
+  static constexpr bool is_inner_left = (InnerP == Kokkos::Iterate::Left);
+  using array_layout = std::conditional_t<is_inner_left, Kokkos::LayoutLeft,
+                                          Kokkos::LayoutRight>;
+  using traits =
+      Kokkos::ViewTraits<T[N0][N1][N2][N3][N4][N5][N6][N7], array_layout, P...>;
+  using type =
+      Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6][N7], array_layout, P...>;
+
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<traits, void>& dst, const ViewMapping<src_traits, void>& src,
+      const src_layout&, const iType0 i_tile0, const iType1 i_tile1,
+      const iType2 i_tile2, const iType3 i_tile3, const iType4 i_tile4,
+      const iType5 i_tile5, const iType6 i_tile6, const iType7 i_tile7) {
+    using dst_map_type    = ViewMapping<traits, void>;
+    using src_map_type    = ViewMapping<src_traits, void>;
+    using dst_handle_type = typename dst_map_type::handle_type;
+    using dst_offset_type = typename dst_map_type::offset_type;
+    using src_offset_type = typename src_map_type::offset_type;
+
+    dst = dst_map_type(
+        dst_handle_type(
+            src.m_impl_handle +
+            (is_outer_left
+                 ? ((i_tile0 +
+                     src.m_impl_offset.m_tile_N0 *
+                         (i_tile1 +
+                          src.m_impl_offset.m_tile_N1 *
+                              (i_tile2 +
+                               src.m_impl_offset.m_tile_N2 *
+                                   (i_tile3 +
+                                    src.m_impl_offset.m_tile_N3 *
+                                        (i_tile4 +
+                                         src.m_impl_offset.m_tile_N4 *
+                                             (i_tile5 +
+                                              src.m_impl_offset.m_tile_N5 *
+                                                  (i_tile6 +
+                                                   src.m_impl_offset.m_tile_N6 *
+                                                       i_tile7)))))))
+                    << src_offset_type::SHIFT_8T)
+                 : ((src.m_impl_offset.m_tile_N7 *
+                         (src.m_impl_offset.m_tile_N6 *
+                              (src.m_impl_offset.m_tile_N5 *
+                                   (src.m_impl_offset.m_tile_N4 *
+                                        (src.m_impl_offset.m_tile_N3 *
+                                             (src.m_impl_offset.m_tile_N2 *
+                                                  (src.m_impl_offset.m_tile_N1 *
+                                                       i_tile0 +
+                                                   i_tile1) +
+                                              i_tile2) +
+                                         i_tile3) +
+                                    i_tile4) +
+                               i_tile5) +
+                          i_tile6) +
+                     i_tile7)
+                    << src_offset_type::SHIFT_8T)))  // offset to start of the
+                                                     // tile
+        ,
+        dst_offset_type());
+  }
+};
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------
+
+namespace Kokkos {
+
+// Rank 2
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P>
+KOKKOS_INLINE_FUNCTION
+    Kokkos::View<T[N0][N1],
+                 std::conditional_t<(InnerP == Kokkos::Iterate::Left),
+                                    Kokkos::LayoutLeft, Kokkos::LayoutRight>,
+                 P...>
+    tile_subview(const Kokkos::View<
+                     T**,
+                     Kokkos::Experimental::LayoutTiled<
+                         OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                     P...>& src,
+                 const size_t i_tile0, const size_t i_tile1) {
+  // Force the specialized ViewMapping for extracting a tile
+  // by using the first subview argument as the layout.
+  using array_layout =
+      std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
+                         Kokkos::LayoutRight>;
+  using SrcLayout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+
+  return Kokkos::View<T[N0][N1], array_layout, P...>(src, SrcLayout(), i_tile0,
+                                                     i_tile1);
+}
+
+// Rank 3
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P>
+KOKKOS_INLINE_FUNCTION
+    Kokkos::View<T[N0][N1][N2],
+                 std::conditional_t<(InnerP == Kokkos::Iterate::Left),
+                                    Kokkos::LayoutLeft, Kokkos::LayoutRight>,
+                 P...>
+    tile_subview(const Kokkos::View<
+                     T***,
+                     Kokkos::Experimental::LayoutTiled<
+                         OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                     P...>& src,
+                 const size_t i_tile0, const size_t i_tile1,
+                 const size_t i_tile2) {
+  // Force the specialized ViewMapping for extracting a tile
+  // by using the first subview argument as the layout.
+  using array_layout =
+      std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
+                         Kokkos::LayoutRight>;
+  using SrcLayout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+
+  return Kokkos::View<T[N0][N1][N2], array_layout, P...>(
+      src, SrcLayout(), i_tile0, i_tile1, i_tile2);
+}
+
+// Rank 4
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P>
+KOKKOS_INLINE_FUNCTION
+    Kokkos::View<T[N0][N1][N2][N3],
+                 std::conditional_t<(InnerP == Kokkos::Iterate::Left),
+                                    Kokkos::LayoutLeft, Kokkos::LayoutRight>,
+                 P...>
+    tile_subview(const Kokkos::View<
+                     T****,
+                     Kokkos::Experimental::LayoutTiled<
+                         OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                     P...>& src,
+                 const size_t i_tile0, const size_t i_tile1,
+                 const size_t i_tile2, const size_t i_tile3) {
+  // Force the specialized ViewMapping for extracting a tile
+  // by using the first subview argument as the layout.
+  using array_layout =
+      std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
+                         Kokkos::LayoutRight>;
+  using SrcLayout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+
+  return Kokkos::View<T[N0][N1][N2][N3], array_layout, P...>(
+      src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3);
+}
+
+// Rank 5
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P>
+KOKKOS_INLINE_FUNCTION
+    Kokkos::View<T[N0][N1][N2][N3][N4],
+                 std::conditional_t<(InnerP == Kokkos::Iterate::Left),
+                                    Kokkos::LayoutLeft, Kokkos::LayoutRight>,
+                 P...>
+    tile_subview(const Kokkos::View<
+                     T*****,
+                     Kokkos::Experimental::LayoutTiled<
+                         OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                     P...>& src,
+                 const size_t i_tile0, const size_t i_tile1,
+                 const size_t i_tile2, const size_t i_tile3,
+                 const size_t i_tile4) {
+  // Force the specialized ViewMapping for extracting a tile
+  // by using the first subview argument as the layout.
+  using array_layout =
+      std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
+                         Kokkos::LayoutRight>;
+  using SrcLayout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+
+  return Kokkos::View<T[N0][N1][N2][N3][N4], array_layout, P...>(
+      src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4);
+}
+
+// Rank 6
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P>
+KOKKOS_INLINE_FUNCTION
+    Kokkos::View<T[N0][N1][N2][N3][N4][N5],
+                 std::conditional_t<(InnerP == Kokkos::Iterate::Left),
+                                    Kokkos::LayoutLeft, Kokkos::LayoutRight>,
+                 P...>
+    tile_subview(const Kokkos::View<
+                     T******,
+                     Kokkos::Experimental::LayoutTiled<
+                         OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                     P...>& src,
+                 const size_t i_tile0, const size_t i_tile1,
+                 const size_t i_tile2, const size_t i_tile3,
+                 const size_t i_tile4, const size_t i_tile5) {
+  // Force the specialized ViewMapping for extracting a tile
+  // by using the first subview argument as the layout.
+  using array_layout =
+      std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
+                         Kokkos::LayoutRight>;
+  using SrcLayout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+
+  return Kokkos::View<T[N0][N1][N2][N3][N4][N5], array_layout, P...>(
+      src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4, i_tile5);
+}
+
+// Rank 7
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P>
+KOKKOS_INLINE_FUNCTION
+    Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6],
+                 std::conditional_t<(InnerP == Kokkos::Iterate::Left),
+                                    Kokkos::LayoutLeft, Kokkos::LayoutRight>,
+                 P...>
+    tile_subview(const Kokkos::View<
+                     T*******,
+                     Kokkos::Experimental::LayoutTiled<
+                         OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                     P...>& src,
+                 const size_t i_tile0, const size_t i_tile1,
+                 const size_t i_tile2, const size_t i_tile3,
+                 const size_t i_tile4, const size_t i_tile5,
+                 const size_t i_tile6) {
+  // Force the specialized ViewMapping for extracting a tile
+  // by using the first subview argument as the layout.
+  using array_layout =
+      std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
+                         Kokkos::LayoutRight>;
+  using SrcLayout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+
+  return Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6], array_layout, P...>(
+      src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4, i_tile5,
+      i_tile6);
+}
+
+// Rank 8
+template <typename T, Kokkos::Iterate OuterP, Kokkos::Iterate InnerP,
+          unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
+          unsigned N5, unsigned N6, unsigned N7, class... P>
+KOKKOS_INLINE_FUNCTION
+    Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6][N7],
+                 std::conditional_t<(InnerP == Kokkos::Iterate::Left),
+                                    Kokkos::LayoutLeft, Kokkos::LayoutRight>,
+                 P...>
+    tile_subview(const Kokkos::View<
+                     T********,
+                     Kokkos::Experimental::LayoutTiled<
+                         OuterP, InnerP, N0, N1, N2, N3, N4, N5, N6, N7, true>,
+                     P...>& src,
+                 const size_t i_tile0, const size_t i_tile1,
+                 const size_t i_tile2, const size_t i_tile3,
+                 const size_t i_tile4, const size_t i_tile5,
+                 const size_t i_tile6, const size_t i_tile7) {
+  // Force the specialized ViewMapping for extracting a tile
+  // by using the first subview argument as the layout.
+  using array_layout =
+      std::conditional_t<(InnerP == Kokkos::Iterate::Left), Kokkos::LayoutLeft,
+                         Kokkos::LayoutRight>;
+  using SrcLayout =
+      Kokkos::Experimental::LayoutTiled<OuterP, InnerP, N0, N1, N2, N3, N4, N5,
+                                        N6, N7, true>;
+
+  return Kokkos::View<T[N0][N1][N2][N3][N4][N5][N6][N7], array_layout, P...>(
+      src, SrcLayout(), i_tile0, i_tile1, i_tile2, i_tile3, i_tile4, i_tile5,
+      i_tile6, i_tile7);
+}
+
+} /* namespace Kokkos */
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_EXPERIENTAL_VIEWLAYOUTTILE_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewMapping.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewMapping.hpp
new file mode 100644 (file)
index 0000000..7382316
--- /dev/null
@@ -0,0 +1,4140 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_VIEW_MAPPING_HPP
+#define KOKKOS_EXPERIMENTAL_VIEW_MAPPING_HPP
+
+#include <type_traits>
+#include <initializer_list>
+
+#include <Kokkos_Core_fwd.hpp>
+#include <Kokkos_DetectionIdiom.hpp>
+#include <Kokkos_Pair.hpp>
+#include <Kokkos_Layout.hpp>
+#include <Kokkos_Extents.hpp>
+#include <impl/Kokkos_Error.hpp>
+#include <impl/Kokkos_Traits.hpp>
+#include <impl/Kokkos_ViewTracker.hpp>
+#include <impl/Kokkos_ViewCtor.hpp>
+#include <impl/Kokkos_Atomic_View.hpp>
+#include <impl/Kokkos_Tools.hpp>
+#include <impl/Kokkos_StringManipulation.hpp>
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <unsigned I, size_t... Args>
+struct variadic_size_t {
+  enum : size_t { value = KOKKOS_INVALID_INDEX };
+};
+
+template <size_t Val, size_t... Args>
+struct variadic_size_t<0, Val, Args...> {
+  enum : size_t { value = Val };
+};
+
+template <unsigned I, size_t Val, size_t... Args>
+struct variadic_size_t<I, Val, Args...> {
+  enum : size_t { value = variadic_size_t<I - 1, Args...>::value };
+};
+
+template <size_t... Args>
+struct rank_dynamic;
+
+template <>
+struct rank_dynamic<> {
+  enum : unsigned { value = 0 };
+};
+
+template <size_t Val, size_t... Args>
+struct rank_dynamic<Val, Args...> {
+  enum : unsigned { value = (Val == 0 ? 1 : 0) + rank_dynamic<Args...>::value };
+};
+
+#define KOKKOS_IMPL_VIEW_DIMENSION(R)                                       \
+  template <size_t V, unsigned>                                             \
+  struct ViewDimension##R {                                                 \
+    static constexpr size_t ArgN##R = (V != KOKKOS_INVALID_INDEX ? V : 1);  \
+    static constexpr size_t N##R    = (V != KOKKOS_INVALID_INDEX ? V : 1);  \
+    KOKKOS_INLINE_FUNCTION explicit ViewDimension##R(size_t) {}             \
+    ViewDimension##R()                        = default;                    \
+    ViewDimension##R(const ViewDimension##R&) = default;                    \
+    ViewDimension##R& operator=(const ViewDimension##R&) = default;         \
+  };                                                                        \
+  template <size_t V, unsigned RD>                                          \
+  constexpr size_t ViewDimension##R<V, RD>::ArgN##R;                        \
+  template <size_t V, unsigned RD>                                          \
+  constexpr size_t ViewDimension##R<V, RD>::N##R;                           \
+  template <unsigned RD>                                                    \
+  struct ViewDimension##R<0u, RD> {                                         \
+    static constexpr size_t ArgN##R = 0;                                    \
+    std::conditional_t<(RD < 3), size_t, unsigned> N##R;                    \
+    ViewDimension##R()                        = default;                    \
+    ViewDimension##R(const ViewDimension##R&) = default;                    \
+    ViewDimension##R& operator=(const ViewDimension##R&) = default;         \
+    KOKKOS_INLINE_FUNCTION explicit ViewDimension##R(size_t V) : N##R(V) {} \
+  };                                                                        \
+  template <unsigned RD>                                                    \
+  constexpr size_t ViewDimension##R<0u, RD>::ArgN##R;
+
+KOKKOS_IMPL_VIEW_DIMENSION(0)
+KOKKOS_IMPL_VIEW_DIMENSION(1)
+KOKKOS_IMPL_VIEW_DIMENSION(2)
+KOKKOS_IMPL_VIEW_DIMENSION(3)
+KOKKOS_IMPL_VIEW_DIMENSION(4)
+KOKKOS_IMPL_VIEW_DIMENSION(5)
+KOKKOS_IMPL_VIEW_DIMENSION(6)
+KOKKOS_IMPL_VIEW_DIMENSION(7)
+
+#undef KOKKOS_IMPL_VIEW_DIMENSION
+
+// MSVC does not do empty base class optimization by default.
+// Per standard it is required for standard layout types
+template <size_t... Vals>
+struct KOKKOS_IMPL_ENFORCE_EMPTY_BASE_OPTIMIZATION ViewDimension
+    : public ViewDimension0<variadic_size_t<0u, Vals...>::value,
+                            rank_dynamic<Vals...>::value>,
+      public ViewDimension1<variadic_size_t<1u, Vals...>::value,
+                            rank_dynamic<Vals...>::value>,
+      public ViewDimension2<variadic_size_t<2u, Vals...>::value,
+                            rank_dynamic<Vals...>::value>,
+      public ViewDimension3<variadic_size_t<3u, Vals...>::value,
+                            rank_dynamic<Vals...>::value>,
+      public ViewDimension4<variadic_size_t<4u, Vals...>::value,
+                            rank_dynamic<Vals...>::value>,
+      public ViewDimension5<variadic_size_t<5u, Vals...>::value,
+                            rank_dynamic<Vals...>::value>,
+      public ViewDimension6<variadic_size_t<6u, Vals...>::value,
+                            rank_dynamic<Vals...>::value>,
+      public ViewDimension7<variadic_size_t<7u, Vals...>::value,
+                            rank_dynamic<Vals...>::value> {
+  using D0 = ViewDimension0<variadic_size_t<0U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+  using D1 = ViewDimension1<variadic_size_t<1U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+  using D2 = ViewDimension2<variadic_size_t<2U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+  using D3 = ViewDimension3<variadic_size_t<3U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+  using D4 = ViewDimension4<variadic_size_t<4U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+  using D5 = ViewDimension5<variadic_size_t<5U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+  using D6 = ViewDimension6<variadic_size_t<6U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+  using D7 = ViewDimension7<variadic_size_t<7U, Vals...>::value,
+                            rank_dynamic<Vals...>::value>;
+
+  using D0::ArgN0;
+  using D1::ArgN1;
+  using D2::ArgN2;
+  using D3::ArgN3;
+  using D4::ArgN4;
+  using D5::ArgN5;
+  using D6::ArgN6;
+  using D7::ArgN7;
+
+  using D0::N0;
+  using D1::N1;
+  using D2::N2;
+  using D3::N3;
+  using D4::N4;
+  using D5::N5;
+  using D6::N6;
+  using D7::N7;
+
+  enum : unsigned { rank = sizeof...(Vals) };
+  enum : unsigned { rank_dynamic = Impl::rank_dynamic<Vals...>::value };
+
+  ViewDimension()                     = default;
+  ViewDimension(const ViewDimension&) = default;
+  ViewDimension& operator=(const ViewDimension&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewDimension(size_t n0, size_t n1, size_t n2, size_t n3, size_t n4,
+                          size_t n5, size_t n6, size_t n7)
+      : D0(n0 == KOKKOS_INVALID_INDEX ? 1 : n0),
+        D1(n1 == KOKKOS_INVALID_INDEX ? 1 : n1),
+        D2(n2 == KOKKOS_INVALID_INDEX ? 1 : n2),
+        D3(n3 == KOKKOS_INVALID_INDEX ? 1 : n3),
+        D4(n4 == KOKKOS_INVALID_INDEX ? 1 : n4),
+        D5(n5 == KOKKOS_INVALID_INDEX ? 1 : n5),
+        D6(n6 == KOKKOS_INVALID_INDEX ? 1 : n6),
+        D7(n7 == KOKKOS_INVALID_INDEX ? 1 : n7) {}
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_t extent(const unsigned r) const noexcept {
+    return r == 0
+               ? N0
+               : (r == 1
+                      ? N1
+                      : (r == 2
+                             ? N2
+                             : (r == 3
+                                    ? N3
+                                    : (r == 4
+                                           ? N4
+                                           : (r == 5
+                                                  ? N5
+                                                  : (r == 6
+                                                         ? N6
+                                                         : (r == 7 ? N7
+                                                                   : 0)))))));
+  }
+
+  static KOKKOS_INLINE_FUNCTION constexpr size_t static_extent(
+      const unsigned r) noexcept {
+    return r == 0
+               ? ArgN0
+               : (r == 1
+                      ? ArgN1
+                      : (r == 2
+                             ? ArgN2
+                             : (r == 3
+                                    ? ArgN3
+                                    : (r == 4
+                                           ? ArgN4
+                                           : (r == 5
+                                                  ? ArgN5
+                                                  : (r == 6
+                                                         ? ArgN6
+                                                         : (r == 7 ? ArgN7
+                                                                   : 0)))))));
+  }
+
+  template <size_t N>
+  struct prepend {
+    using type = ViewDimension<N, Vals...>;
+  };
+
+  template <size_t N>
+  struct append {
+    using type = ViewDimension<Vals..., N>;
+  };
+};
+
+template <class A, class B>
+struct ViewDimensionJoin;
+
+template <size_t... A, size_t... B>
+struct ViewDimensionJoin<ViewDimension<A...>, ViewDimension<B...>> {
+  using type = ViewDimension<A..., B...>;
+};
+
+//----------------------------------------------------------------------------
+
+template <class DstDim, class SrcDim>
+struct ViewDimensionAssignable;
+
+template <size_t... DstArgs, size_t... SrcArgs>
+struct ViewDimensionAssignable<ViewDimension<DstArgs...>,
+                               ViewDimension<SrcArgs...>> {
+  using dst = ViewDimension<DstArgs...>;
+  using src = ViewDimension<SrcArgs...>;
+
+  enum {
+    value = unsigned(dst::rank) == unsigned(src::rank) &&
+            (
+                // Compile time check that potential static dimensions match
+                ((1 > dst::rank_dynamic && 1 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN0) == size_t(src::ArgN0))
+                     : true) &&
+                ((2 > dst::rank_dynamic && 2 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN1) == size_t(src::ArgN1))
+                     : true) &&
+                ((3 > dst::rank_dynamic && 3 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN2) == size_t(src::ArgN2))
+                     : true) &&
+                ((4 > dst::rank_dynamic && 4 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN3) == size_t(src::ArgN3))
+                     : true) &&
+                ((5 > dst::rank_dynamic && 5 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN4) == size_t(src::ArgN4))
+                     : true) &&
+                ((6 > dst::rank_dynamic && 6 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN5) == size_t(src::ArgN5))
+                     : true) &&
+                ((7 > dst::rank_dynamic && 7 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN6) == size_t(src::ArgN6))
+                     : true) &&
+                ((8 > dst::rank_dynamic && 8 > src::rank_dynamic)
+                     ? (size_t(dst::ArgN7) == size_t(src::ArgN7))
+                     : true))
+  };
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+struct ALL_t {
+  KOKKOS_INLINE_FUNCTION
+  constexpr const ALL_t& operator()() const { return *this; }
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr bool operator==(const ALL_t&) const { return true; }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+namespace Kokkos {
+namespace Impl {
+
+template <class T>
+struct is_integral_extent_type {
+  enum : bool { value = std::is_same<T, Kokkos::Impl::ALL_t>::value ? 1 : 0 };
+};
+
+template <class iType>
+struct is_integral_extent_type<std::pair<iType, iType>> {
+  enum : bool { value = std::is_integral<iType>::value ? 1 : 0 };
+};
+
+template <class iType>
+struct is_integral_extent_type<Kokkos::pair<iType, iType>> {
+  enum : bool { value = std::is_integral<iType>::value ? 1 : 0 };
+};
+
+// Assuming '2 == initializer_list<iType>::size()'
+template <class iType>
+struct is_integral_extent_type<std::initializer_list<iType>> {
+  enum : bool { value = std::is_integral<iType>::value ? 1 : 0 };
+};
+
+template <unsigned I, class... Args>
+struct is_integral_extent {
+  // get_type is void when sizeof...(Args) <= I
+  using type = std::remove_cv_t<std::remove_reference_t<
+      typename Kokkos::Impl::get_type<I, Args...>::type>>;
+
+  enum : bool { value = is_integral_extent_type<type>::value };
+
+  static_assert(value || std::is_integral<type>::value ||
+                    std::is_void<type>::value,
+                "subview argument must be either integral or integral extent");
+};
+
+// Rules for subview arguments and layouts matching
+
+template <class LayoutDest, class LayoutSrc, int RankDest, int RankSrc,
+          int CurrentArg, class... SubViewArgs>
+struct SubviewLegalArgsCompileTime;
+
+// Rules which allow LayoutLeft to LayoutLeft assignment
+
+template <int RankDest, int RankSrc, int CurrentArg, class Arg,
+          class... SubViewArgs>
+struct SubviewLegalArgsCompileTime<Kokkos::LayoutLeft, Kokkos::LayoutLeft,
+                                   RankDest, RankSrc, CurrentArg, Arg,
+                                   SubViewArgs...> {
+  enum {
+    value = (((CurrentArg == RankDest - 1) &&
+              (Kokkos::Impl::is_integral_extent_type<Arg>::value)) ||
+             ((CurrentArg >= RankDest) && (std::is_integral<Arg>::value)) ||
+             ((CurrentArg < RankDest) &&
+              (std::is_same<Arg, Kokkos::Impl::ALL_t>::value)) ||
+             ((CurrentArg == 0) &&
+              (Kokkos::Impl::is_integral_extent_type<Arg>::value))) &&
+            (SubviewLegalArgsCompileTime<Kokkos::LayoutLeft, Kokkos::LayoutLeft,
+                                         RankDest, RankSrc, CurrentArg + 1,
+                                         SubViewArgs...>::value)
+  };
+};
+
+template <int RankDest, int RankSrc, int CurrentArg, class Arg>
+struct SubviewLegalArgsCompileTime<Kokkos::LayoutLeft, Kokkos::LayoutLeft,
+                                   RankDest, RankSrc, CurrentArg, Arg> {
+  enum {
+    value = ((CurrentArg == RankDest - 1) || (std::is_integral<Arg>::value)) &&
+            (CurrentArg == RankSrc - 1)
+  };
+};
+
+// Rules which allow LayoutRight to LayoutRight assignment
+
+template <int RankDest, int RankSrc, int CurrentArg, class Arg,
+          class... SubViewArgs>
+struct SubviewLegalArgsCompileTime<Kokkos::LayoutRight, Kokkos::LayoutRight,
+                                   RankDest, RankSrc, CurrentArg, Arg,
+                                   SubViewArgs...> {
+  enum {
+    value = (((CurrentArg == RankSrc - RankDest) &&
+              (Kokkos::Impl::is_integral_extent_type<Arg>::value)) ||
+             ((CurrentArg < RankSrc - RankDest) &&
+              (std::is_integral<Arg>::value)) ||
+             ((CurrentArg >= RankSrc - RankDest) &&
+              (std::is_same<Arg, Kokkos::Impl::ALL_t>::value))) &&
+            (SubviewLegalArgsCompileTime<Kokkos::LayoutRight,
+                                         Kokkos::LayoutRight, RankDest, RankSrc,
+                                         CurrentArg + 1, SubViewArgs...>::value)
+  };
+};
+
+template <int RankDest, int RankSrc, int CurrentArg, class Arg>
+struct SubviewLegalArgsCompileTime<Kokkos::LayoutRight, Kokkos::LayoutRight,
+                                   RankDest, RankSrc, CurrentArg, Arg> {
+  enum {
+    value = ((CurrentArg == RankSrc - 1) &&
+             (std::is_same<Arg, Kokkos::Impl::ALL_t>::value))
+  };
+};
+
+// Rules which allow assignment to LayoutStride
+
+template <int RankDest, int RankSrc, int CurrentArg, class... SubViewArgs>
+struct SubviewLegalArgsCompileTime<Kokkos::LayoutStride, Kokkos::LayoutLeft,
+                                   RankDest, RankSrc, CurrentArg,
+                                   SubViewArgs...> {
+  enum : bool { value = true };
+};
+
+template <int RankDest, int RankSrc, int CurrentArg, class... SubViewArgs>
+struct SubviewLegalArgsCompileTime<Kokkos::LayoutStride, Kokkos::LayoutRight,
+                                   RankDest, RankSrc, CurrentArg,
+                                   SubViewArgs...> {
+  enum : bool { value = true };
+};
+
+template <int RankDest, int RankSrc, int CurrentArg, class... SubViewArgs>
+struct SubviewLegalArgsCompileTime<Kokkos::LayoutStride, Kokkos::LayoutStride,
+                                   RankDest, RankSrc, CurrentArg,
+                                   SubViewArgs...> {
+  enum : bool { value = true };
+};
+
+template <unsigned DomainRank, unsigned RangeRank>
+struct SubviewExtents {
+ private:
+  // Cannot declare zero-length arrays
+  // '+' is used to silence GCC 7.2.0 -Wduplicated-branches warning when
+  // RangeRank=1
+  enum { InternalRangeRank = RangeRank ? RangeRank : +1u };
+
+  size_t m_begin[DomainRank];
+  size_t m_length[InternalRangeRank];
+  unsigned m_index[InternalRangeRank];
+
+  template <size_t... DimArgs>
+  KOKKOS_FORCEINLINE_FUNCTION bool set(unsigned, unsigned,
+                                       const ViewDimension<DimArgs...>&) {
+    return true;
+  }
+
+  template <class T, size_t... DimArgs, class... Args>
+  KOKKOS_FORCEINLINE_FUNCTION bool set(unsigned domain_rank,
+                                       unsigned range_rank,
+                                       const ViewDimension<DimArgs...>& dim,
+                                       const T& val, Args... args) {
+    const size_t v = static_cast<size_t>(val);
+
+    m_begin[domain_rank] = v;
+
+    return set(domain_rank + 1, range_rank, dim, args...)
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+           && (v < dim.extent(domain_rank))
+#endif
+        ;
+  }
+
+  // ALL_t
+  template <size_t... DimArgs, class... Args>
+  KOKKOS_FORCEINLINE_FUNCTION bool set(unsigned domain_rank,
+                                       unsigned range_rank,
+                                       const ViewDimension<DimArgs...>& dim,
+                                       const Kokkos::Impl::ALL_t,
+                                       Args... args) {
+    m_begin[domain_rank] = 0;
+    m_length[range_rank] = dim.extent(domain_rank);
+    m_index[range_rank]  = domain_rank;
+
+    return set(domain_rank + 1, range_rank + 1, dim, args...);
+  }
+
+  // std::pair range
+  template <class T, size_t... DimArgs, class... Args>
+  KOKKOS_FORCEINLINE_FUNCTION bool set(unsigned domain_rank,
+                                       unsigned range_rank,
+                                       const ViewDimension<DimArgs...>& dim,
+                                       const std::pair<T, T>& val,
+                                       Args... args) {
+    const size_t b = static_cast<size_t>(val.first);
+    const size_t e = static_cast<size_t>(val.second);
+
+    m_begin[domain_rank] = b;
+    m_length[range_rank] = e - b;
+    m_index[range_rank]  = domain_rank;
+
+    return set(domain_rank + 1, range_rank + 1, dim, args...)
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+           && (e <= b + dim.extent(domain_rank))
+#endif
+        ;
+  }
+
+  // Kokkos::pair range
+  template <class T, size_t... DimArgs, class... Args>
+  KOKKOS_FORCEINLINE_FUNCTION bool set(unsigned domain_rank,
+                                       unsigned range_rank,
+                                       const ViewDimension<DimArgs...>& dim,
+                                       const Kokkos::pair<T, T>& val,
+                                       Args... args) {
+    const size_t b = static_cast<size_t>(val.first);
+    const size_t e = static_cast<size_t>(val.second);
+
+    m_begin[domain_rank] = b;
+    m_length[range_rank] = e - b;
+    m_index[range_rank]  = domain_rank;
+
+    return set(domain_rank + 1, range_rank + 1, dim, args...)
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+           && (e <= b + dim.extent(domain_rank))
+#endif
+        ;
+  }
+
+  // { begin , end } range
+  template <class T, size_t... DimArgs, class... Args>
+  KOKKOS_FORCEINLINE_FUNCTION bool set(unsigned domain_rank,
+                                       unsigned range_rank,
+                                       const ViewDimension<DimArgs...>& dim,
+                                       const std::initializer_list<T>& val,
+                                       Args... args) {
+    const size_t b = static_cast<size_t>(val.begin()[0]);
+    const size_t e = static_cast<size_t>(val.begin()[1]);
+
+    m_begin[domain_rank] = b;
+    m_length[range_rank] = e - b;
+    m_index[range_rank]  = domain_rank;
+
+    return set(domain_rank + 1, range_rank + 1, dim, args...)
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+           && (val.size() == 2) && (e <= b + dim.extent(domain_rank))
+#endif
+        ;
+  }
+
+  //------------------------------
+
+#if defined(KOKKOS_ENABLE_DEBUG_BOUNDS_CHECK)
+
+  template <size_t... DimArgs>
+  void error(char*, int, unsigned, unsigned,
+             const ViewDimension<DimArgs...>&) const {}
+
+  template <class T, size_t... DimArgs, class... Args>
+  void error(char* buf, int buf_len, unsigned domain_rank, unsigned range_rank,
+             const ViewDimension<DimArgs...>& dim, const T& val,
+             Args... args) const {
+    const int n = std::min(
+        buf_len,
+        snprintf(buf, buf_len, " %lu < %lu %c", static_cast<unsigned long>(val),
+                 static_cast<unsigned long>(dim.extent(domain_rank)),
+                 int(sizeof...(Args) ? ',' : ')')));
+
+    error(buf + n, buf_len - n, domain_rank + 1, range_rank, dim, args...);
+  }
+
+  // std::pair range
+  template <size_t... DimArgs, class... Args>
+  void error(char* buf, int buf_len, unsigned domain_rank, unsigned range_rank,
+             const ViewDimension<DimArgs...>& dim, const Kokkos::Impl::ALL_t,
+             Args... args) const {
+    const int n = std::min(buf_len, snprintf(buf, buf_len, " Kokkos::ALL %c",
+                                             int(sizeof...(Args) ? ',' : ')')));
+
+    error(buf + n, buf_len - n, domain_rank + 1, range_rank + 1, dim, args...);
+  }
+
+  // std::pair range
+  template <class T, size_t... DimArgs, class... Args>
+  void error(char* buf, int buf_len, unsigned domain_rank, unsigned range_rank,
+             const ViewDimension<DimArgs...>& dim, const std::pair<T, T>& val,
+             Args... args) const {
+    // d <= e - b
+    const int n = std::min(
+        buf_len, snprintf(buf, buf_len, " %lu <= %lu - %lu %c",
+                          static_cast<unsigned long>(dim.extent(domain_rank)),
+                          static_cast<unsigned long>(val.second),
+                          static_cast<unsigned long>(val.first),
+                          int(sizeof...(Args) ? ',' : ')')));
+
+    error(buf + n, buf_len - n, domain_rank + 1, range_rank + 1, dim, args...);
+  }
+
+  // Kokkos::pair range
+  template <class T, size_t... DimArgs, class... Args>
+  void error(char* buf, int buf_len, unsigned domain_rank, unsigned range_rank,
+             const ViewDimension<DimArgs...>& dim,
+             const Kokkos::pair<T, T>& val, Args... args) const {
+    // d <= e - b
+    const int n = std::min(
+        buf_len, snprintf(buf, buf_len, " %lu <= %lu - %lu %c",
+                          static_cast<unsigned long>(dim.extent(domain_rank)),
+                          static_cast<unsigned long>(val.second),
+                          static_cast<unsigned long>(val.first),
+                          int(sizeof...(Args) ? ',' : ')')));
+
+    error(buf + n, buf_len - n, domain_rank + 1, range_rank + 1, dim, args...);
+  }
+
+  // { begin , end } range
+  template <class T, size_t... DimArgs, class... Args>
+  void error(char* buf, int buf_len, unsigned domain_rank, unsigned range_rank,
+             const ViewDimension<DimArgs...>& dim,
+             const std::initializer_list<T>& val, Args... args) const {
+    // d <= e - b
+    int n = 0;
+    if (val.size() == 2) {
+      n = std::min(buf_len,
+                   snprintf(buf, buf_len, " %lu <= %lu - %lu %c",
+                            static_cast<unsigned long>(dim.extent(domain_rank)),
+                            static_cast<unsigned long>(val.begin()[0]),
+                            static_cast<unsigned long>(val.begin()[1]),
+                            int(sizeof...(Args) ? ',' : ')')));
+    } else {
+      n = std::min(buf_len, snprintf(buf, buf_len, " { ... }.size() == %u %c",
+                                     unsigned(val.size()),
+                                     int(sizeof...(Args) ? ',' : ')')));
+    }
+
+    error(buf + n, buf_len - n, domain_rank + 1, range_rank + 1, dim, args...);
+  }
+
+  template <size_t... DimArgs, class... Args>
+  KOKKOS_FORCEINLINE_FUNCTION void error(const ViewDimension<DimArgs...>& dim,
+                                         Args... args) const {
+    KOKKOS_IF_ON_HOST(
+        (enum {LEN = 1024}; char buffer[LEN];
+
+         const int n = snprintf(buffer, LEN, "Kokkos::subview bounds error (");
+         error(buffer + n, LEN - n, 0, 0, dim, args...);
+
+         Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
+
+    KOKKOS_IF_ON_DEVICE(((void)dim;
+                         Kokkos::abort("Kokkos::subview bounds error");
+                         [](Args...) {}(args...);))
+  }
+
+#else
+
+  template <size_t... DimArgs, class... Args>
+  KOKKOS_FORCEINLINE_FUNCTION void error(const ViewDimension<DimArgs...>&,
+                                         Args...) const {}
+
+#endif
+
+ public:
+  template <size_t... DimArgs, class... Args>
+  KOKKOS_INLINE_FUNCTION SubviewExtents(const ViewDimension<DimArgs...>& dim,
+                                        Args... args) {
+    static_assert(DomainRank == sizeof...(DimArgs), "");
+    static_assert(DomainRank == sizeof...(Args), "");
+
+    // Verifies that all arguments, up to 8, are integral types,
+    // integral extents, or don't exist.
+    static_assert(
+        RangeRank == unsigned(is_integral_extent<0, Args...>::value) +
+                         unsigned(is_integral_extent<1, Args...>::value) +
+                         unsigned(is_integral_extent<2, Args...>::value) +
+                         unsigned(is_integral_extent<3, Args...>::value) +
+                         unsigned(is_integral_extent<4, Args...>::value) +
+                         unsigned(is_integral_extent<5, Args...>::value) +
+                         unsigned(is_integral_extent<6, Args...>::value) +
+                         unsigned(is_integral_extent<7, Args...>::value),
+        "");
+
+    if (RangeRank == 0) {
+      m_length[0] = 0;
+      m_index[0]  = ~0u;
+    }
+
+    if (!set(0, 0, dim, args...)) error(dim, args...);
+  }
+
+  template <typename iType>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr size_t domain_offset(
+      const iType i) const {
+    return unsigned(i) < DomainRank ? m_begin[i] : 0;
+  }
+
+  template <typename iType>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr size_t range_extent(
+      const iType i) const {
+    return unsigned(i) < InternalRangeRank ? m_length[i] : 0;
+  }
+
+  template <typename iType>
+  KOKKOS_FORCEINLINE_FUNCTION constexpr unsigned range_index(
+      const iType i) const {
+    return unsigned(i) < InternalRangeRank ? m_index[i] : ~0u;
+  }
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/** \brief  Given a value type and dimension generate the View data type */
+template <class T, class Dim>
+struct ViewDataType;
+
+template <class T>
+struct ViewDataType<T, ViewDimension<>> {
+  using type = T;
+};
+
+template <class T, size_t... Args>
+struct ViewDataType<T, ViewDimension<0, Args...>> {
+  using type = typename ViewDataType<T*, ViewDimension<Args...>>::type;
+};
+
+template <class T, size_t N, size_t... Args>
+struct ViewDataType<T, ViewDimension<N, Args...>> {
+  using type = typename ViewDataType<T, ViewDimension<Args...>>::type[N];
+};
+
+/**\brief  Analysis of View data type.
+ *
+ *  Data type conforms to one of the following patterns :
+ *    {const} value_type [][#][#][#]
+ *    {const} value_type ***[#][#][#]
+ *  Where the sum of counts of '*' and '[#]' is at most ten.
+ *
+ *  Provide alias for ViewDimension<...> and value_type.
+ */
+template <class T>
+struct ViewArrayAnalysis {
+  using value_type           = T;
+  using const_value_type     = std::add_const_t<T>;
+  using non_const_value_type = std::remove_const_t<T>;
+  using static_dimension     = ViewDimension<>;
+  using dynamic_dimension    = ViewDimension<>;
+  using dimension            = ViewDimension<>;
+};
+
+template <class T, size_t N>
+struct ViewArrayAnalysis<T[N]> {
+ private:
+  using nested = ViewArrayAnalysis<T>;
+
+ public:
+  using value_type           = typename nested::value_type;
+  using const_value_type     = typename nested::const_value_type;
+  using non_const_value_type = typename nested::non_const_value_type;
+
+  using static_dimension =
+      typename nested::static_dimension::template prepend<N>::type;
+
+  using dynamic_dimension = typename nested::dynamic_dimension;
+
+  using dimension =
+      typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
+};
+
+template <class T>
+struct ViewArrayAnalysis<T[]> {
+ private:
+  using nested           = ViewArrayAnalysis<T>;
+  using nested_dimension = typename nested::dimension;
+
+ public:
+  using value_type           = typename nested::value_type;
+  using const_value_type     = typename nested::const_value_type;
+  using non_const_value_type = typename nested::non_const_value_type;
+
+  using dynamic_dimension =
+      typename nested::dynamic_dimension::template prepend<0>::type;
+
+  using static_dimension = typename nested::static_dimension;
+
+  using dimension =
+      typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
+};
+
+template <class T>
+struct ViewArrayAnalysis<T*> {
+ private:
+  using nested = ViewArrayAnalysis<T>;
+
+ public:
+  using value_type           = typename nested::value_type;
+  using const_value_type     = typename nested::const_value_type;
+  using non_const_value_type = typename nested::non_const_value_type;
+
+  using dynamic_dimension =
+      typename nested::dynamic_dimension::template prepend<0>::type;
+
+  using static_dimension = typename nested::static_dimension;
+
+  using dimension =
+      typename ViewDimensionJoin<dynamic_dimension, static_dimension>::type;
+};
+
+template <class DataType, class ArrayLayout, class ValueType>
+struct ViewDataAnalysis {
+ private:
+  using array_analysis = ViewArrayAnalysis<DataType>;
+
+  // ValueType is opportunity for partial specialization.
+  // Must match array analysis when this default template is used.
+  static_assert(
+      std::is_same<ValueType,
+                   typename array_analysis::non_const_value_type>::value,
+      "");
+
+ public:
+  using specialize = void;  // No specialization
+
+  using dimension            = typename array_analysis::dimension;
+  using value_type           = typename array_analysis::value_type;
+  using const_value_type     = typename array_analysis::const_value_type;
+  using non_const_value_type = typename array_analysis::non_const_value_type;
+
+  // Generate analogous multidimensional array specification type.
+  using type       = typename ViewDataType<value_type, dimension>::type;
+  using const_type = typename ViewDataType<const_value_type, dimension>::type;
+  using non_const_type =
+      typename ViewDataType<non_const_value_type, dimension>::type;
+
+  // Generate "flattened" multidimensional array specification type.
+  using scalar_array_type           = type;
+  using const_scalar_array_type     = const_type;
+  using non_const_scalar_array_type = non_const_type;
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Dimension, class Layout, class Enable = void>
+struct ViewOffset {
+  using is_mapping_plugin = std::false_type;
+};
+
+//----------------------------------------------------------------------------
+// LayoutLeft AND ( 1 >= rank OR 0 == rank_dynamic ) : no padding / striding
+template <class Dimension>
+struct ViewOffset<
+    Dimension, Kokkos::LayoutLeft,
+    std::enable_if_t<(1 >= Dimension::rank || 0 == Dimension::rank_dynamic)>> {
+  using is_mapping_plugin = std::true_type;
+  using is_regular        = std::true_type;
+
+  using size_type      = size_t;
+  using dimension_type = Dimension;
+  using array_layout   = Kokkos::LayoutLeft;
+
+  dimension_type m_dim;
+
+  //----------------------------------------
+
+  // rank 1
+  template <typename I0>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0) const {
+    return i0;
+  }
+
+  // rank 2
+  template <typename I0, typename I1>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1) const {
+    return i0 + m_dim.N0 * i1;
+  }
+
+  // rank 3
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2) const {
+    return i0 + m_dim.N0 * (i1 + m_dim.N1 * i2);
+  }
+
+  // rank 4
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3) const {
+    return i0 + m_dim.N0 * (i1 + m_dim.N1 * (i2 + m_dim.N2 * i3));
+  }
+
+  // rank 5
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3,
+                                                        I4 const& i4) const {
+    return i0 +
+           m_dim.N0 * (i1 + m_dim.N1 * (i2 + m_dim.N2 * (i3 + m_dim.N3 * i4)));
+  }
+
+  // rank 6
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5) const {
+    return i0 +
+           m_dim.N0 *
+               (i1 +
+                m_dim.N1 *
+                    (i2 + m_dim.N2 * (i3 + m_dim.N3 * (i4 + m_dim.N4 * i5))));
+  }
+
+  // rank 7
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6) const {
+    return i0 +
+           m_dim.N0 *
+               (i1 + m_dim.N1 *
+                         (i2 + m_dim.N2 *
+                                   (i3 + m_dim.N3 *
+                                             (i4 + m_dim.N4 *
+                                                       (i5 + m_dim.N5 * i6)))));
+  }
+
+  // rank 8
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6, I7 const& i7) const {
+    return i0 +
+           m_dim.N0 *
+               (i1 +
+                m_dim.N1 *
+                    (i2 + m_dim.N2 *
+                              (i3 + m_dim.N3 *
+                                        (i4 + m_dim.N4 *
+                                                  (i5 + m_dim.N5 *
+                                                            (i6 + m_dim.N6 *
+                                                                      i7))))));
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr array_layout layout() const {
+    constexpr auto r = dimension_type::rank;
+    return array_layout((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
+                        (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
+                        (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
+                        (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
+                        (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
+                        (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
+                        (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
+                        (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
+    return m_dim.N0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_1() const {
+    return m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_2() const {
+    return m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_3() const {
+    return m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_4() const {
+    return m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_5() const {
+    return m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_6() const {
+    return m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_7() const {
+    return m_dim.N7;
+  }
+
+  /* Cardinality of the domain index space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type size() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+  /* Span of the range space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type span() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return true;
+  }
+
+  /* Strides of dimensions */
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const { return 1; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const {
+    return m_dim.N0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const {
+    return size_type(m_dim.N0) * m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_6() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_7() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5 * m_dim.N6;
+  }
+
+  // Stride with [ rank ] value is the total length
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    s[0] = 1;
+    if (0 < dimension_type::rank) {
+      s[1] = m_dim.N0;
+    }
+    if (1 < dimension_type::rank) {
+      s[2] = s[1] * m_dim.N1;
+    }
+    if (2 < dimension_type::rank) {
+      s[3] = s[2] * m_dim.N2;
+    }
+    if (3 < dimension_type::rank) {
+      s[4] = s[3] * m_dim.N3;
+    }
+    if (4 < dimension_type::rank) {
+      s[5] = s[4] * m_dim.N4;
+    }
+    if (5 < dimension_type::rank) {
+      s[6] = s[5] * m_dim.N5;
+    }
+    if (6 < dimension_type::rank) {
+      s[7] = s[6] * m_dim.N6;
+    }
+    if (7 < dimension_type::rank) {
+      s[8] = s[7] * m_dim.N7;
+    }
+  }
+
+  //----------------------------------------
+
+  // MSVC (16.5.5) + CUDA (10.2) did not generate the defaulted functions
+  // correct and errors out during compilation. Same for the other places where
+  // I changed this.
+#ifdef KOKKOS_IMPL_WINDOWS_CUDA
+  KOKKOS_FUNCTION ViewOffset() : m_dim(dimension_type()) {}
+  KOKKOS_FUNCTION ViewOffset(const ViewOffset& src) { m_dim = src.m_dim; }
+  KOKKOS_FUNCTION ViewOffset& operator=(const ViewOffset& src) {
+    m_dim = src.m_dim;
+    return *this;
+  }
+#else
+  ViewOffset()                  = default;
+  ViewOffset(const ViewOffset&) = default;
+  ViewOffset& operator=(const ViewOffset&) = default;
+#endif
+
+  template <unsigned TrivialScalarSize>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      std::integral_constant<unsigned, TrivialScalarSize> const&,
+      Kokkos::LayoutLeft const& arg_layout)
+      : m_dim(arg_layout.dimension[0], 0, 0, 0, 0, 0, 0, 0) {}
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutLeft, void>& rhs)
+      : m_dim(rhs.m_dim.N0, rhs.m_dim.N1, rhs.m_dim.N2, rhs.m_dim.N3,
+              rhs.m_dim.N4, rhs.m_dim.N5, rhs.m_dim.N6, rhs.m_dim.N7) {
+    static_assert(int(DimRHS::rank) == int(dimension_type::rank),
+                  "ViewOffset assignment requires equal rank");
+    // Also requires equal static dimensions ...
+  }
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutRight, void>& rhs)
+      : m_dim(rhs.m_dim.N0, 0, 0, 0, 0, 0, 0, 0) {
+    static_assert((DimRHS::rank == 0 && dimension_type::rank == 0) ||
+                      (DimRHS::rank == 1 && dimension_type::rank == 1 &&
+                       dimension_type::rank_dynamic == 1),
+                  "ViewOffset LayoutLeft and LayoutRight are only compatible "
+                  "when rank <= 1");
+  }
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutStride, void>& rhs)
+      : m_dim(rhs.m_dim.N0, 0, 0, 0, 0, 0, 0, 0) {
+    if (rhs.m_stride.S0 != 1) {
+      Kokkos::abort(
+          "Kokkos::Impl::ViewOffset assignment of LayoutLeft from LayoutStride "
+          " requires stride == 1");
+    }
+  }
+
+  //----------------------------------------
+  // Subview construction
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutLeft, void>&,
+      const SubviewExtents<DimRHS::rank, dimension_type::rank>& sub)
+      : m_dim(sub.range_extent(0), 0, 0, 0, 0, 0, 0, 0) {
+    static_assert((0 == dimension_type::rank_dynamic) ||
+                      (1 == dimension_type::rank &&
+                       1 == dimension_type::rank_dynamic && 1 <= DimRHS::rank),
+                  "ViewOffset subview construction requires compatible rank");
+  }
+};
+
+//----------------------------------------------------------------------------
+// LayoutLeft AND ( 1 < rank AND 0 < rank_dynamic ) : has padding / striding
+template <class Dimension>
+struct ViewOffset<
+    Dimension, Kokkos::LayoutLeft,
+    std::enable_if_t<(1 < Dimension::rank && 0 < Dimension::rank_dynamic)>> {
+  using is_mapping_plugin = std::true_type;
+  using is_regular        = std::true_type;
+
+  using size_type      = size_t;
+  using dimension_type = Dimension;
+  using array_layout   = Kokkos::LayoutLeft;
+
+  dimension_type m_dim;
+  size_type m_stride;
+
+  //----------------------------------------
+
+  // rank 1
+  template <typename I0>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0) const {
+    return i0;
+  }
+
+  // rank 2
+  template <typename I0, typename I1>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1) const {
+    return i0 + m_stride * i1;
+  }
+
+  // rank 3
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2) const {
+    return i0 + m_stride * (i1 + m_dim.N1 * i2);
+  }
+
+  // rank 4
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3) const {
+    return i0 + m_stride * (i1 + m_dim.N1 * (i2 + m_dim.N2 * i3));
+  }
+
+  // rank 5
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3,
+                                                        I4 const& i4) const {
+    return i0 +
+           m_stride * (i1 + m_dim.N1 * (i2 + m_dim.N2 * (i3 + m_dim.N3 * i4)));
+  }
+
+  // rank 6
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5) const {
+    return i0 +
+           m_stride *
+               (i1 +
+                m_dim.N1 *
+                    (i2 + m_dim.N2 * (i3 + m_dim.N3 * (i4 + m_dim.N4 * i5))));
+  }
+
+  // rank 7
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6) const {
+    return i0 +
+           m_stride *
+               (i1 + m_dim.N1 *
+                         (i2 + m_dim.N2 *
+                                   (i3 + m_dim.N3 *
+                                             (i4 + m_dim.N4 *
+                                                       (i5 + m_dim.N5 * i6)))));
+  }
+
+  // rank 8
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6, I7 const& i7) const {
+    return i0 +
+           m_stride *
+               (i1 +
+                m_dim.N1 *
+                    (i2 + m_dim.N2 *
+                              (i3 + m_dim.N3 *
+                                        (i4 + m_dim.N4 *
+                                                  (i5 + m_dim.N5 *
+                                                            (i6 + m_dim.N6 *
+                                                                      i7))))));
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr array_layout layout() const {
+    constexpr auto r = dimension_type::rank;
+    return array_layout((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
+                        (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
+                        (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
+                        (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
+                        (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
+                        (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
+                        (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
+                        (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
+    return m_dim.N0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_1() const {
+    return m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_2() const {
+    return m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_3() const {
+    return m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_4() const {
+    return m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_5() const {
+    return m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_6() const {
+    return m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_7() const {
+    return m_dim.N7;
+  }
+
+  /* Cardinality of the domain index space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type size() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+  /* Span of the range space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type span() const {
+    return (m_dim.N0 > size_type(0) ? m_stride : size_type(0)) * m_dim.N1 *
+           m_dim.N2 * m_dim.N3 * m_dim.N4 * m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return m_stride == m_dim.N0;
+  }
+
+  /* Strides of dimensions */
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const { return 1; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const {
+    return m_stride;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const {
+    return m_stride * m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const {
+    return m_stride * m_dim.N1 * m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const {
+    return m_stride * m_dim.N1 * m_dim.N2 * m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const {
+    return m_stride * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_6() const {
+    return m_stride * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 * m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_7() const {
+    return m_stride * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 * m_dim.N5 *
+           m_dim.N6;
+  }
+
+  // Stride with [ rank ] value is the total length
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    s[0] = 1;
+    if (0 < dimension_type::rank) {
+      s[1] = m_stride;
+    }
+    if (1 < dimension_type::rank) {
+      s[2] = s[1] * m_dim.N1;
+    }
+    if (2 < dimension_type::rank) {
+      s[3] = s[2] * m_dim.N2;
+    }
+    if (3 < dimension_type::rank) {
+      s[4] = s[3] * m_dim.N3;
+    }
+    if (4 < dimension_type::rank) {
+      s[5] = s[4] * m_dim.N4;
+    }
+    if (5 < dimension_type::rank) {
+      s[6] = s[5] * m_dim.N5;
+    }
+    if (6 < dimension_type::rank) {
+      s[7] = s[6] * m_dim.N6;
+    }
+    if (7 < dimension_type::rank) {
+      s[8] = s[7] * m_dim.N7;
+    }
+  }
+
+  //----------------------------------------
+
+ private:
+  template <unsigned TrivialScalarSize>
+  struct Padding {
+    enum {
+      div = TrivialScalarSize == 0
+                ? 0
+                : Kokkos::Impl::MEMORY_ALIGNMENT /
+                      (TrivialScalarSize ? TrivialScalarSize : 1)
+    };
+    enum {
+      mod = TrivialScalarSize == 0
+                ? 0
+                : Kokkos::Impl::MEMORY_ALIGNMENT %
+                      (TrivialScalarSize ? TrivialScalarSize : 1)
+    };
+
+    // If memory alignment is a multiple of the trivial scalar size then attempt
+    // to align.
+    enum { align = 0 != TrivialScalarSize && 0 == mod ? div : 0 };
+    enum {
+      div_ok = (div != 0) ? div : 1
+    };  // To valid modulo zero in constexpr
+
+    KOKKOS_INLINE_FUNCTION
+    static constexpr size_t stride(size_t const N) {
+      return ((align != 0) &&
+              ((static_cast<int>(Kokkos::Impl::MEMORY_ALIGNMENT_THRESHOLD) *
+                static_cast<int>(align)) < N) &&
+              ((N % div_ok) != 0))
+                 ? N + align - (N % div_ok)
+                 : N;
+    }
+  };
+
+ public:
+  // MSVC (16.5.5) + CUDA (10.2) did not generate the defaulted functions
+  // correct and errors out during compilation. Same for the other places where
+  // I changed this.
+#ifdef KOKKOS_IMPL_WINDOWS_CUDA
+  KOKKOS_FUNCTION ViewOffset() : m_dim(dimension_type()), m_stride(0) {}
+  KOKKOS_FUNCTION ViewOffset(const ViewOffset& src) {
+    m_dim    = src.m_dim;
+    m_stride = src.m_stride;
+  }
+  KOKKOS_FUNCTION ViewOffset& operator=(const ViewOffset& src) {
+    m_dim    = src.m_dim;
+    m_stride = src.m_stride;
+    return *this;
+  }
+#else
+
+  ViewOffset()                  = default;
+  ViewOffset(const ViewOffset&) = default;
+  ViewOffset& operator=(const ViewOffset&) = default;
+#endif
+
+  /* Enable padding for trivial scalar types with non-zero trivial scalar size
+   */
+  template <unsigned TrivialScalarSize>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      std::integral_constant<unsigned, TrivialScalarSize> const&,
+      Kokkos::LayoutLeft const& arg_layout)
+      : m_dim(arg_layout.dimension[0], arg_layout.dimension[1],
+              arg_layout.dimension[2], arg_layout.dimension[3],
+              arg_layout.dimension[4], arg_layout.dimension[5],
+              arg_layout.dimension[6], arg_layout.dimension[7]),
+        m_stride(Padding<TrivialScalarSize>::stride(arg_layout.dimension[0])) {}
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutLeft, void>& rhs)
+      : m_dim(rhs.m_dim.N0, rhs.m_dim.N1, rhs.m_dim.N2, rhs.m_dim.N3,
+              rhs.m_dim.N4, rhs.m_dim.N5, rhs.m_dim.N6, rhs.m_dim.N7),
+        m_stride(rhs.stride_1()) {
+    static_assert(int(DimRHS::rank) == int(dimension_type::rank),
+                  "ViewOffset assignment requires equal rank");
+    // Also requires equal static dimensions ...
+  }
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutStride, void>& rhs)
+      : m_dim(rhs.m_dim.N0, rhs.m_dim.N1, rhs.m_dim.N2, rhs.m_dim.N3,
+              rhs.m_dim.N4, rhs.m_dim.N5, rhs.m_dim.N6, rhs.m_dim.N7),
+        m_stride(rhs.stride_1()) {
+    if (rhs.m_stride.S0 != 1) {
+      Kokkos::abort(
+          "Kokkos::Impl::ViewOffset assignment of LayoutLeft from LayoutStride "
+          "requires stride == 1");
+    }
+  }
+
+  //----------------------------------------
+  // Subview construction
+  // This subview must be 2 == rank and 2 == rank_dynamic
+  // due to only having stride #0.
+  // The source dimension #0 must be non-zero for stride-one leading dimension.
+  // At most subsequent dimension can be non-zero.
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutLeft, void>& rhs,
+      const SubviewExtents<DimRHS::rank, dimension_type::rank>& sub)
+      : m_dim(sub.range_extent(0), sub.range_extent(1), sub.range_extent(2),
+              sub.range_extent(3), sub.range_extent(4), sub.range_extent(5),
+              sub.range_extent(6), sub.range_extent(7)),
+        m_stride(
+            (1 == sub.range_index(1)
+                 ? rhs.stride_1()
+                 : (2 == sub.range_index(1)
+                        ? rhs.stride_2()
+                        : (3 == sub.range_index(1)
+                               ? rhs.stride_3()
+                               : (4 == sub.range_index(1)
+                                      ? rhs.stride_4()
+                                      : (5 == sub.range_index(1)
+                                             ? rhs.stride_5()
+                                             : (6 == sub.range_index(1)
+                                                    ? rhs.stride_6()
+                                                    : (7 == sub.range_index(1)
+                                                           ? rhs.stride_7()
+                                                           : 0)))))))) {
+    // static_assert( ( 2 == dimension_type::rank ) &&
+    //               ( 2 == dimension_type::rank_dynamic ) &&
+    //               ( 2 <= DimRHS::rank )
+    //             , "ViewOffset subview construction requires compatible rank"
+    //             );
+  }
+};
+
+//----------------------------------------------------------------------------
+// LayoutRight AND ( 1 >= rank OR 0 == rank_dynamic ) : no padding / striding
+template <class Dimension>
+struct ViewOffset<
+    Dimension, Kokkos::LayoutRight,
+    std::enable_if_t<(1 >= Dimension::rank || 0 == Dimension::rank_dynamic)>> {
+  using is_mapping_plugin = std::true_type;
+  using is_regular        = std::true_type;
+
+  using size_type      = size_t;
+  using dimension_type = Dimension;
+  using array_layout   = Kokkos::LayoutRight;
+
+  dimension_type m_dim;
+
+  //----------------------------------------
+
+  // rank 1
+  template <typename I0>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0) const {
+    return i0;
+  }
+
+  // rank 2
+  template <typename I0, typename I1>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1) const {
+    return i1 + m_dim.N1 * i0;
+  }
+
+  // rank 3
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2) const {
+    return i2 + m_dim.N2 * (i1 + m_dim.N1 * (i0));
+  }
+
+  // rank 4
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3) const {
+    return i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1 + m_dim.N1 * (i0)));
+  }
+
+  // rank 5
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3,
+                                                        I4 const& i4) const {
+    return i4 + m_dim.N4 *
+                    (i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1 + m_dim.N1 * (i0))));
+  }
+
+  // rank 6
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5) const {
+    return i5 +
+           m_dim.N5 *
+               (i4 +
+                m_dim.N4 *
+                    (i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1 + m_dim.N1 * (i0)))));
+  }
+
+  // rank 7
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6) const {
+    return i6 +
+           m_dim.N6 *
+               (i5 +
+                m_dim.N5 *
+                    (i4 +
+                     m_dim.N4 *
+                         (i3 + m_dim.N3 *
+                                   (i2 + m_dim.N2 * (i1 + m_dim.N1 * (i0))))));
+  }
+
+  // rank 8
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6, I7 const& i7) const {
+    return i7 +
+           m_dim.N7 *
+               (i6 +
+                m_dim.N6 *
+                    (i5 +
+                     m_dim.N5 *
+                         (i4 +
+                          m_dim.N4 *
+                              (i3 +
+                               m_dim.N3 *
+                                   (i2 + m_dim.N2 * (i1 + m_dim.N1 * (i0)))))));
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr array_layout layout() const {
+    constexpr auto r = dimension_type::rank;
+    return array_layout((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
+                        (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
+                        (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
+                        (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
+                        (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
+                        (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
+                        (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
+                        (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
+    return m_dim.N0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_1() const {
+    return m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_2() const {
+    return m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_3() const {
+    return m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_4() const {
+    return m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_5() const {
+    return m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_6() const {
+    return m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_7() const {
+    return m_dim.N7;
+  }
+
+  /* Cardinality of the domain index space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type size() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+  /* Span of the range space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type span() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return true;
+  }
+
+  /* Strides of dimensions */
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_7() const { return 1; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_6() const {
+    return m_dim.N7;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const {
+    return m_dim.N7 * m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3 * m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3 * m_dim.N2 *
+           m_dim.N1;
+  }
+
+  // Stride with [ rank ] value is the total length
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    size_type n = 1;
+    if (7 < dimension_type::rank) {
+      s[7] = n;
+      n *= m_dim.N7;
+    }
+    if (6 < dimension_type::rank) {
+      s[6] = n;
+      n *= m_dim.N6;
+    }
+    if (5 < dimension_type::rank) {
+      s[5] = n;
+      n *= m_dim.N5;
+    }
+    if (4 < dimension_type::rank) {
+      s[4] = n;
+      n *= m_dim.N4;
+    }
+    if (3 < dimension_type::rank) {
+      s[3] = n;
+      n *= m_dim.N3;
+    }
+    if (2 < dimension_type::rank) {
+      s[2] = n;
+      n *= m_dim.N2;
+    }
+    if (1 < dimension_type::rank) {
+      s[1] = n;
+      n *= m_dim.N1;
+    }
+    if (0 < dimension_type::rank) {
+      s[0] = n;
+    }
+    s[dimension_type::rank] = n * m_dim.N0;
+  }
+
+  //----------------------------------------
+  // MSVC (16.5.5) + CUDA (10.2) did not generate the defaulted functions
+  // correct and errors out during compilation. Same for the other places where
+  // I changed this.
+
+#ifdef KOKKOS_IMPL_WINDOWS_CUDA
+  KOKKOS_FUNCTION ViewOffset() : m_dim(dimension_type()) {}
+  KOKKOS_FUNCTION ViewOffset(const ViewOffset& src) { m_dim = src.m_dim; }
+  KOKKOS_FUNCTION ViewOffset& operator=(const ViewOffset& src) {
+    m_dim = src.m_dim;
+    return *this;
+  }
+#else
+
+  ViewOffset()                  = default;
+  ViewOffset(const ViewOffset&) = default;
+  ViewOffset& operator=(const ViewOffset&) = default;
+#endif
+
+  template <unsigned TrivialScalarSize>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      std::integral_constant<unsigned, TrivialScalarSize> const&,
+      Kokkos::LayoutRight const& arg_layout)
+      : m_dim(arg_layout.dimension[0], 0, 0, 0, 0, 0, 0, 0) {}
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutRight, void>& rhs)
+      : m_dim(rhs.m_dim.N0, rhs.m_dim.N1, rhs.m_dim.N2, rhs.m_dim.N3,
+              rhs.m_dim.N4, rhs.m_dim.N5, rhs.m_dim.N6, rhs.m_dim.N7) {
+    static_assert(int(DimRHS::rank) == int(dimension_type::rank),
+                  "ViewOffset assignment requires equal rank");
+    // Also requires equal static dimensions ...
+  }
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutLeft, void>& rhs)
+      : m_dim(rhs.m_dim.N0, 0, 0, 0, 0, 0, 0, 0) {
+    static_assert((DimRHS::rank == 0 && dimension_type::rank == 0) ||
+                      (DimRHS::rank == 1 && dimension_type::rank == 1 &&
+                       dimension_type::rank_dynamic == 1),
+                  "ViewOffset LayoutRight and LayoutLeft are only compatible "
+                  "when rank <= 1");
+  }
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutStride, void>& rhs)
+      : m_dim(rhs.m_dim.N0, 0, 0, 0, 0, 0, 0, 0) {}
+
+  //----------------------------------------
+  // Subview construction
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutRight, void>&,
+      const SubviewExtents<DimRHS::rank, dimension_type::rank>& sub)
+      : m_dim(sub.range_extent(0), 0, 0, 0, 0, 0, 0, 0) {
+    static_assert((0 == dimension_type::rank_dynamic) ||
+                      (1 == dimension_type::rank &&
+                       1 == dimension_type::rank_dynamic && 1 <= DimRHS::rank),
+                  "ViewOffset subview construction requires compatible rank");
+  }
+};
+
+//----------------------------------------------------------------------------
+// LayoutRight AND ( 1 < rank AND 0 < rank_dynamic ) : has padding / striding
+template <class Dimension>
+struct ViewOffset<
+    Dimension, Kokkos::LayoutRight,
+    std::enable_if_t<(1 < Dimension::rank && 0 < Dimension::rank_dynamic)>> {
+  using is_mapping_plugin = std::true_type;
+  using is_regular        = std::true_type;
+
+  using size_type      = size_t;
+  using dimension_type = Dimension;
+  using array_layout   = Kokkos::LayoutRight;
+
+  dimension_type m_dim;
+  size_type m_stride;
+
+  //----------------------------------------
+
+  // rank 1
+  template <typename I0>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0) const {
+    return i0;
+  }
+
+  // rank 2
+  template <typename I0, typename I1>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1) const {
+    return i1 + i0 * m_stride;
+  }
+
+  // rank 3
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2) const {
+    return i2 + m_dim.N2 * (i1) + i0 * m_stride;
+  }
+
+  // rank 4
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3) const {
+    return i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1)) + i0 * m_stride;
+  }
+
+  // rank 5
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3,
+                                                        I4 const& i4) const {
+    return i4 + m_dim.N4 * (i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1))) +
+           i0 * m_stride;
+  }
+
+  // rank 6
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5) const {
+    return i5 +
+           m_dim.N5 *
+               (i4 + m_dim.N4 * (i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1)))) +
+           i0 * m_stride;
+  }
+
+  // rank 7
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6) const {
+    return i6 +
+           m_dim.N6 *
+               (i5 + m_dim.N5 *
+                         (i4 + m_dim.N4 *
+                                   (i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1))))) +
+           i0 * m_stride;
+  }
+
+  // rank 8
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6, I7 const& i7) const {
+    return i7 +
+           m_dim.N7 *
+               (i6 +
+                m_dim.N6 *
+                    (i5 +
+                     m_dim.N5 *
+                         (i4 + m_dim.N4 *
+                                   (i3 + m_dim.N3 * (i2 + m_dim.N2 * (i1)))))) +
+           i0 * m_stride;
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr array_layout layout() const {
+    constexpr auto r = dimension_type::rank;
+    return array_layout((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX),
+                        (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX),
+                        (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX),
+                        (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX),
+                        (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX),
+                        (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX),
+                        (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX),
+                        (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
+    return m_dim.N0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_1() const {
+    return m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_2() const {
+    return m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_3() const {
+    return m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_4() const {
+    return m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_5() const {
+    return m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_6() const {
+    return m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_7() const {
+    return m_dim.N7;
+  }
+
+  /* Cardinality of the domain index space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type size() const {
+    return size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 * m_dim.N4 *
+           m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+  /* Span of the range space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type span() const {
+    return size() > 0 ? size_type(m_dim.N0) * m_stride : 0;
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return m_stride == m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3 *
+                           m_dim.N2 * m_dim.N1;
+  }
+
+  /* Strides of dimensions */
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_7() const { return 1; }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_6() const {
+    return m_dim.N7;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const {
+    return m_dim.N7 * m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const {
+    return m_dim.N7 * m_dim.N6 * m_dim.N5 * m_dim.N4 * m_dim.N3 * m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const {
+    return m_stride;
+  }
+
+  // Stride with [ rank ] value is the total length
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    size_type n = 1;
+    if (7 < dimension_type::rank) {
+      s[7] = n;
+      n *= m_dim.N7;
+    }
+    if (6 < dimension_type::rank) {
+      s[6] = n;
+      n *= m_dim.N6;
+    }
+    if (5 < dimension_type::rank) {
+      s[5] = n;
+      n *= m_dim.N5;
+    }
+    if (4 < dimension_type::rank) {
+      s[4] = n;
+      n *= m_dim.N4;
+    }
+    if (3 < dimension_type::rank) {
+      s[3] = n;
+      n *= m_dim.N3;
+    }
+    if (2 < dimension_type::rank) {
+      s[2] = n;
+      n *= m_dim.N2;
+    }
+    if (1 < dimension_type::rank) {
+      s[1] = n;
+    }
+    if (0 < dimension_type::rank) {
+      s[0] = m_stride;
+    }
+    s[dimension_type::rank] = m_stride * m_dim.N0;
+  }
+
+  //----------------------------------------
+
+ private:
+  template <unsigned TrivialScalarSize>
+  struct Padding {
+    enum {
+      div = TrivialScalarSize == 0
+                ? 0
+                : Kokkos::Impl::MEMORY_ALIGNMENT /
+                      (TrivialScalarSize ? TrivialScalarSize : 1)
+    };
+    enum {
+      mod = TrivialScalarSize == 0
+                ? 0
+                : Kokkos::Impl::MEMORY_ALIGNMENT %
+                      (TrivialScalarSize ? TrivialScalarSize : 1)
+    };
+
+    // If memory alignment is a multiple of the trivial scalar size then attempt
+    // to align.
+    enum { align = 0 != TrivialScalarSize && 0 == mod ? div : 0 };
+    enum {
+      div_ok = (div != 0) ? div : 1
+    };  // To valid modulo zero in constexpr
+
+    KOKKOS_INLINE_FUNCTION
+    static constexpr size_t stride(size_t const N) {
+      return ((align != 0) &&
+              ((static_cast<int>(Kokkos::Impl::MEMORY_ALIGNMENT_THRESHOLD) *
+                static_cast<int>(align)) < N) &&
+              ((N % div_ok) != 0))
+                 ? N + align - (N % div_ok)
+                 : N;
+    }
+  };
+
+ public:
+  // MSVC (16.5.5) + CUDA (10.2) did not generate the defaulted functions
+  // correct and errors out during compilation. Same for the other places where
+  // I changed this.
+
+#ifdef KOKKOS_IMPL_WINDOWS_CUDA
+  KOKKOS_FUNCTION ViewOffset() : m_dim(dimension_type()), m_stride(0) {}
+  KOKKOS_FUNCTION ViewOffset(const ViewOffset& src) {
+    m_dim    = src.m_dim;
+    m_stride = src.m_stride;
+  }
+  KOKKOS_FUNCTION ViewOffset& operator=(const ViewOffset& src) {
+    m_dim    = src.m_dim;
+    m_stride = src.m_stride;
+    return *this;
+  }
+#else
+
+  ViewOffset()                  = default;
+  ViewOffset(const ViewOffset&) = default;
+  ViewOffset& operator=(const ViewOffset&) = default;
+#endif
+
+  /* Enable padding for trivial scalar types with non-zero trivial scalar size.
+   */
+  template <unsigned TrivialScalarSize>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      std::integral_constant<unsigned, TrivialScalarSize> const&,
+      Kokkos::LayoutRight const& arg_layout)
+      : m_dim(arg_layout.dimension[0], arg_layout.dimension[1],
+              arg_layout.dimension[2], arg_layout.dimension[3],
+              arg_layout.dimension[4], arg_layout.dimension[5],
+              arg_layout.dimension[6], arg_layout.dimension[7]),
+        m_stride(
+            Padding<TrivialScalarSize>::
+                stride(/* 2 <= rank */
+                       m_dim.N1 *
+                       (dimension_type::rank == 2
+                            ? size_t(1)
+                            : m_dim.N2 *
+                                  (dimension_type::rank == 3
+                                       ? size_t(1)
+                                       : m_dim.N3 *
+                                             (dimension_type::rank == 4
+                                                  ? size_t(1)
+                                                  : m_dim.N4 *
+                                                        (dimension_type::rank ==
+                                                                 5
+                                                             ? size_t(1)
+                                                             : m_dim.N5 *
+                                                                   (dimension_type::
+                                                                                rank ==
+                                                                            6
+                                                                        ? size_t(
+                                                                              1)
+                                                                        : m_dim.N6 *
+                                                                              (dimension_type::
+                                                                                           rank ==
+                                                                                       7
+                                                                                   ? size_t(
+                                                                                         1)
+                                                                                   : m_dim
+                                                                                         .N7)))))))) {
+  }
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutRight, void>& rhs)
+      : m_dim(rhs.m_dim.N0, rhs.m_dim.N1, rhs.m_dim.N2, rhs.m_dim.N3,
+              rhs.m_dim.N4, rhs.m_dim.N5, rhs.m_dim.N6, rhs.m_dim.N7),
+        m_stride(rhs.stride_0()) {
+    static_assert(int(DimRHS::rank) == int(dimension_type::rank),
+                  "ViewOffset assignment requires equal rank");
+    // Also requires equal static dimensions ...
+  }
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutStride, void>& rhs)
+      : m_dim(rhs.m_dim.N0, rhs.m_dim.N1, rhs.m_dim.N2, rhs.m_dim.N3,
+              rhs.m_dim.N4, rhs.m_dim.N5, rhs.m_dim.N6, rhs.m_dim.N7),
+        m_stride(rhs.stride_0()) {
+    if (((dimension_type::rank == 2)
+             ? rhs.m_stride.S1
+             : ((dimension_type::rank == 3)
+                    ? rhs.m_stride.S2
+                    : ((dimension_type::rank == 4)
+                           ? rhs.m_stride.S3
+                           : ((dimension_type::rank == 5)
+                                  ? rhs.m_stride.S4
+                                  : ((dimension_type::rank == 6)
+                                         ? rhs.m_stride.S5
+                                         : ((dimension_type::rank == 7)
+                                                ? rhs.m_stride.S6
+                                                : rhs.m_stride.S7)))))) != 1) {
+      Kokkos::abort(
+          "Kokkos::Impl::ViewOffset assignment of LayoutRight from "
+          "LayoutStride requires right-most stride == 1");
+    }
+  }
+
+  //----------------------------------------
+  // Subview construction
+  // Last dimension must be non-zero
+
+  template <class DimRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, Kokkos::LayoutRight, void>& rhs,
+      const SubviewExtents<DimRHS::rank, dimension_type::rank>& sub)
+      : m_dim(sub.range_extent(0), sub.range_extent(1), sub.range_extent(2),
+              sub.range_extent(3), sub.range_extent(4), sub.range_extent(5),
+              sub.range_extent(6), sub.range_extent(7)),
+        m_stride(
+            0 == sub.range_index(0)
+                ? rhs.stride_0()
+                : (1 == sub.range_index(0)
+                       ? rhs.stride_1()
+                       : (2 == sub.range_index(0)
+                              ? rhs.stride_2()
+                              : (3 == sub.range_index(0)
+                                     ? rhs.stride_3()
+                                     : (4 == sub.range_index(0)
+                                            ? rhs.stride_4()
+                                            : (5 == sub.range_index(0)
+                                                   ? rhs.stride_5()
+                                                   : (6 == sub.range_index(0)
+                                                          ? rhs.stride_6()
+                                                          : 0))))))) {
+    /*      // This subview must be 2 == rank and 2 == rank_dynamic
+          // due to only having stride #0.
+          // The source dimension #0 must be non-zero for stride-one leading
+       dimension.
+          // At most subsequent dimension can be non-zero.
+
+          static_assert( (( 2 == dimension_type::rank ) &&
+                          ( 2 <= DimRHS::rank )) ||
+                         ()
+                       , "ViewOffset subview construction requires compatible
+       rank" );
+    */
+  }
+};
+
+//----------------------------------------------------------------------------
+/* Strided array layout only makes sense for 0 < rank */
+/* rank = 0 included for DynRankView case */
+
+template <unsigned Rank>
+struct ViewStride;
+
+template <>
+struct ViewStride<0> {
+  enum { S0 = 0, S1 = 0, S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t, size_t, size_t, size_t, size_t, size_t, size_t,
+                       size_t) {}
+};
+
+template <>
+struct ViewStride<1> {
+  size_t S0;
+  enum { S1 = 0, S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t, size_t, size_t, size_t, size_t,
+                       size_t, size_t)
+      : S0(aS0) {}
+};
+
+template <>
+struct ViewStride<2> {
+  size_t S0, S1;
+  enum { S2 = 0, S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t aS1, size_t, size_t, size_t, size_t,
+                       size_t, size_t)
+      : S0(aS0), S1(aS1) {}
+};
+
+template <>
+struct ViewStride<3> {
+  size_t S0, S1, S2;
+  enum { S3 = 0, S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t aS1, size_t aS2, size_t, size_t,
+                       size_t, size_t, size_t)
+      : S0(aS0), S1(aS1), S2(aS2) {}
+};
+
+template <>
+struct ViewStride<4> {
+  size_t S0, S1, S2, S3;
+  enum { S4 = 0, S5 = 0, S6 = 0, S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t aS1, size_t aS2, size_t aS3, size_t,
+                       size_t, size_t, size_t)
+      : S0(aS0), S1(aS1), S2(aS2), S3(aS3) {}
+};
+
+template <>
+struct ViewStride<5> {
+  size_t S0, S1, S2, S3, S4;
+  enum { S5 = 0, S6 = 0, S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t aS1, size_t aS2, size_t aS3,
+                       size_t aS4, size_t, size_t, size_t)
+      : S0(aS0), S1(aS1), S2(aS2), S3(aS3), S4(aS4) {}
+};
+
+template <>
+struct ViewStride<6> {
+  size_t S0, S1, S2, S3, S4, S5;
+  enum { S6 = 0, S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t aS1, size_t aS2, size_t aS3,
+                       size_t aS4, size_t aS5, size_t, size_t)
+      : S0(aS0), S1(aS1), S2(aS2), S3(aS3), S4(aS4), S5(aS5) {}
+};
+
+template <>
+struct ViewStride<7> {
+  size_t S0, S1, S2, S3, S4, S5, S6;
+  enum { S7 = 0 };
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t aS1, size_t aS2, size_t aS3,
+                       size_t aS4, size_t aS5, size_t aS6, size_t)
+      : S0(aS0), S1(aS1), S2(aS2), S3(aS3), S4(aS4), S5(aS5), S6(aS6) {}
+};
+
+template <>
+struct ViewStride<8> {
+  size_t S0, S1, S2, S3, S4, S5, S6, S7;
+
+  ViewStride()                  = default;
+  ViewStride(const ViewStride&) = default;
+  ViewStride& operator=(const ViewStride&) = default;
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewStride(size_t aS0, size_t aS1, size_t aS2, size_t aS3,
+                       size_t aS4, size_t aS5, size_t aS6, size_t aS7)
+      : S0(aS0),
+        S1(aS1),
+        S2(aS2),
+        S3(aS3),
+        S4(aS4),
+        S5(aS5),
+        S6(aS6),
+        S7(aS7) {}
+};
+
+template <class Dimension>
+struct ViewOffset<Dimension, Kokkos::LayoutStride, void> {
+ private:
+  using stride_type = ViewStride<Dimension::rank>;
+
+ public:
+  using is_mapping_plugin = std::true_type;
+  using is_regular        = std::true_type;
+
+  using size_type      = size_t;
+  using dimension_type = Dimension;
+  using array_layout   = Kokkos::LayoutStride;
+
+  dimension_type m_dim;
+  stride_type m_stride;
+
+  //----------------------------------------
+
+  // rank 1
+  template <typename I0>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0) const {
+    return i0 * m_stride.S0;
+  }
+
+  // rank 2
+  template <typename I0, typename I1>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1) const {
+    return i0 * m_stride.S0 + i1 * m_stride.S1;
+  }
+
+  // rank 3
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2) const {
+    return i0 * m_stride.S0 + i1 * m_stride.S1 + i2 * m_stride.S2;
+  }
+
+  // rank 4
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3) const {
+    return i0 * m_stride.S0 + i1 * m_stride.S1 + i2 * m_stride.S2 +
+           i3 * m_stride.S3;
+  }
+
+  // rank 5
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(I0 const& i0,
+                                                        I1 const& i1,
+                                                        I2 const& i2,
+                                                        I3 const& i3,
+                                                        I4 const& i4) const {
+    return i0 * m_stride.S0 + i1 * m_stride.S1 + i2 * m_stride.S2 +
+           i3 * m_stride.S3 + i4 * m_stride.S4;
+  }
+
+  // rank 6
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5) const {
+    return i0 * m_stride.S0 + i1 * m_stride.S1 + i2 * m_stride.S2 +
+           i3 * m_stride.S3 + i4 * m_stride.S4 + i5 * m_stride.S5;
+  }
+
+  // rank 7
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6) const {
+    return i0 * m_stride.S0 + i1 * m_stride.S1 + i2 * m_stride.S2 +
+           i3 * m_stride.S3 + i4 * m_stride.S4 + i5 * m_stride.S5 +
+           i6 * m_stride.S6;
+  }
+
+  // rank 8
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_INLINE_FUNCTION constexpr size_type operator()(
+      I0 const& i0, I1 const& i1, I2 const& i2, I3 const& i3, I4 const& i4,
+      I5 const& i5, I6 const& i6, I7 const& i7) const {
+    return i0 * m_stride.S0 + i1 * m_stride.S1 + i2 * m_stride.S2 +
+           i3 * m_stride.S3 + i4 * m_stride.S4 + i5 * m_stride.S5 +
+           i6 * m_stride.S6 + i7 * m_stride.S7;
+  }
+
+  //----------------------------------------
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr array_layout layout() const {
+    constexpr auto r = dimension_type::rank;
+    return array_layout((r > 0 ? m_dim.N0 : KOKKOS_INVALID_INDEX), m_stride.S0,
+                        (r > 1 ? m_dim.N1 : KOKKOS_INVALID_INDEX), m_stride.S1,
+                        (r > 2 ? m_dim.N2 : KOKKOS_INVALID_INDEX), m_stride.S2,
+                        (r > 3 ? m_dim.N3 : KOKKOS_INVALID_INDEX), m_stride.S3,
+                        (r > 4 ? m_dim.N4 : KOKKOS_INVALID_INDEX), m_stride.S4,
+                        (r > 5 ? m_dim.N5 : KOKKOS_INVALID_INDEX), m_stride.S5,
+                        (r > 6 ? m_dim.N6 : KOKKOS_INVALID_INDEX), m_stride.S6,
+                        (r > 7 ? m_dim.N7 : KOKKOS_INVALID_INDEX), m_stride.S7);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_0() const {
+    return m_dim.N0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_1() const {
+    return m_dim.N1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_2() const {
+    return m_dim.N2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_3() const {
+    return m_dim.N3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_4() const {
+    return m_dim.N4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_5() const {
+    return m_dim.N5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_6() const {
+    return m_dim.N6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type dimension_7() const {
+    return m_dim.N7;
+  }
+
+  /* Cardinality of the domain index space */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type size() const {
+    return dimension_type::rank == 0
+               ? 1
+               : size_type(m_dim.N0) * m_dim.N1 * m_dim.N2 * m_dim.N3 *
+                     m_dim.N4 * m_dim.N5 * m_dim.N6 * m_dim.N7;
+  }
+
+ private:
+  KOKKOS_INLINE_FUNCTION
+  static constexpr size_type Max(size_type lhs, size_type rhs) {
+    return lhs < rhs ? rhs : lhs;
+  }
+
+ public:
+  /* Span of the range space, largest stride * dimension */
+  KOKKOS_INLINE_FUNCTION
+  constexpr size_type span() const {
+    return dimension_type::rank == 0
+               ? 1
+               : (size() == size_type(0)
+                      ? size_type(0)
+                      : Max(m_dim.N0 * m_stride.S0,
+                            Max(m_dim.N1 * m_stride.S1,
+                                Max(m_dim.N2 * m_stride.S2,
+                                    Max(m_dim.N3 * m_stride.S3,
+                                        Max(m_dim.N4 * m_stride.S4,
+                                            Max(m_dim.N5 * m_stride.S5,
+                                                Max(m_dim.N6 * m_stride.S6,
+                                                    m_dim.N7 *
+                                                        m_stride.S7))))))));
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return span() == size();
+  }
+
+  /* Strides of dimensions */
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_0() const {
+    return m_stride.S0;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_1() const {
+    return m_stride.S1;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_2() const {
+    return m_stride.S2;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_3() const {
+    return m_stride.S3;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_4() const {
+    return m_stride.S4;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_5() const {
+    return m_stride.S5;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_6() const {
+    return m_stride.S6;
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_type stride_7() const {
+    return m_stride.S7;
+  }
+
+  // Stride with [ rank ] value is the total length
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    if (0 < dimension_type::rank) {
+      s[0] = m_stride.S0;
+    }
+    if (1 < dimension_type::rank) {
+      s[1] = m_stride.S1;
+    }
+    if (2 < dimension_type::rank) {
+      s[2] = m_stride.S2;
+    }
+    if (3 < dimension_type::rank) {
+      s[3] = m_stride.S3;
+    }
+    if (4 < dimension_type::rank) {
+      s[4] = m_stride.S4;
+    }
+    if (5 < dimension_type::rank) {
+      s[5] = m_stride.S5;
+    }
+    if (6 < dimension_type::rank) {
+      s[6] = m_stride.S6;
+    }
+    if (7 < dimension_type::rank) {
+      s[7] = m_stride.S7;
+    }
+    s[dimension_type::rank] = span();
+  }
+
+  //----------------------------------------
+  // MSVC (16.5.5) + CUDA (10.2) did not generate the defaulted functions
+  // correct and errors out during compilation. Same for the other places where
+  // I changed this.
+
+#ifdef KOKKOS_IMPL_WINDOWS_CUDA
+  KOKKOS_FUNCTION ViewOffset()
+      : m_dim(dimension_type()), m_stride(stride_type()) {}
+  KOKKOS_FUNCTION ViewOffset(const ViewOffset& src) {
+    m_dim    = src.m_dim;
+    m_stride = src.m_stride;
+  }
+  KOKKOS_FUNCTION ViewOffset& operator=(const ViewOffset& src) {
+    m_dim    = src.m_dim;
+    m_stride = src.m_stride;
+    return *this;
+  }
+#else
+
+  ViewOffset()                  = default;
+  ViewOffset(const ViewOffset&) = default;
+  ViewOffset& operator=(const ViewOffset&) = default;
+#endif
+
+  KOKKOS_INLINE_FUNCTION
+  constexpr ViewOffset(std::integral_constant<unsigned, 0> const&,
+                       Kokkos::LayoutStride const& rhs)
+      : m_dim(rhs.dimension[0], rhs.dimension[1], rhs.dimension[2],
+              rhs.dimension[3], rhs.dimension[4], rhs.dimension[5],
+              rhs.dimension[6], rhs.dimension[7]),
+        m_stride(rhs.stride[0], rhs.stride[1], rhs.stride[2], rhs.stride[3],
+                 rhs.stride[4], rhs.stride[5], rhs.stride[6], rhs.stride[7]) {}
+
+  template <class DimRHS, class LayoutRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, LayoutRHS, void>& rhs)
+      : m_dim(rhs.m_dim.N0, rhs.m_dim.N1, rhs.m_dim.N2, rhs.m_dim.N3,
+              rhs.m_dim.N4, rhs.m_dim.N5, rhs.m_dim.N6, rhs.m_dim.N7),
+        m_stride(rhs.stride_0(), rhs.stride_1(), rhs.stride_2(), rhs.stride_3(),
+                 rhs.stride_4(), rhs.stride_5(), rhs.stride_6(),
+                 rhs.stride_7()) {
+    static_assert(int(DimRHS::rank) == int(dimension_type::rank),
+                  "ViewOffset assignment requires equal rank");
+    // Also requires equal static dimensions ...
+  }
+
+  //----------------------------------------
+  // Subview construction
+
+ private:
+  template <class DimRHS, class LayoutRHS>
+  KOKKOS_INLINE_FUNCTION static constexpr size_t stride(
+      unsigned r, const ViewOffset<DimRHS, LayoutRHS, void>& rhs) {
+    return r > 7
+               ? 0
+               : (r == 0
+                      ? rhs.stride_0()
+                      : (r == 1
+                             ? rhs.stride_1()
+                             : (r == 2
+                                    ? rhs.stride_2()
+                                    : (r == 3
+                                           ? rhs.stride_3()
+                                           : (r == 4
+                                                  ? rhs.stride_4()
+                                                  : (r == 5
+                                                         ? rhs.stride_5()
+                                                         : (r == 6
+                                                                ? rhs.stride_6()
+                                                                : rhs.stride_7())))))));
+  }
+
+ public:
+  template <class DimRHS, class LayoutRHS>
+  KOKKOS_INLINE_FUNCTION constexpr ViewOffset(
+      const ViewOffset<DimRHS, LayoutRHS, void>& rhs,
+      const SubviewExtents<DimRHS::rank, dimension_type::rank>& sub)
+      // range_extent(r) returns 0 when dimension_type::rank <= r
+      : m_dim(sub.range_extent(0), sub.range_extent(1), sub.range_extent(2),
+              sub.range_extent(3), sub.range_extent(4), sub.range_extent(5),
+              sub.range_extent(6), sub.range_extent(7))
+        // range_index(r) returns ~0u when dimension_type::rank <= r
+        ,
+        m_stride(
+            stride(sub.range_index(0), rhs), stride(sub.range_index(1), rhs),
+            stride(sub.range_index(2), rhs), stride(sub.range_index(3), rhs),
+            stride(sub.range_index(4), rhs), stride(sub.range_index(5), rhs),
+            stride(sub.range_index(6), rhs), stride(sub.range_index(7), rhs)) {}
+};
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+/** \brief  ViewDataHandle provides the type of the 'data handle' which the view
+ *          uses to access data with the [] operator. It also provides
+ *          an allocate function and a function to extract a raw ptr from the
+ *          data handle. ViewDataHandle also defines an enum ReferenceAble which
+ *          specifies whether references/pointers to elements can be taken and a
+ *          'return_type' which is what the view operators will give back.
+ *          Specialisation of this object allows three things depending
+ *          on ViewTraits and compiler options:
+ *          (i)   Use special allocator (e.g. huge pages/small pages and pinned
+ * memory) (ii)  Use special data handle type (e.g. add Cuda Texture Object)
+ *          (iii) Use special access intrinsics (e.g. texture fetch and
+ * non-caching loads)
+ */
+template <class Traits, class Enable = void>
+struct ViewDataHandle {
+  using value_type  = typename Traits::value_type;
+  using handle_type = typename Traits::value_type*;
+  using return_type = typename Traits::value_type&;
+  using track_type  = Kokkos::Impl::SharedAllocationTracker;
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type assign(value_type* arg_data_ptr,
+                            track_type const& /*arg_tracker*/) {
+    return handle_type(arg_data_ptr);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type assign(handle_type const arg_data_ptr, size_t offset) {
+    return handle_type(arg_data_ptr + offset);
+  }
+};
+
+template <class Traits>
+struct ViewDataHandle<
+    Traits,
+    std::enable_if_t<(std::is_same<typename Traits::non_const_value_type,
+                                   typename Traits::value_type>::value &&
+                      std::is_void<typename Traits::specialize>::value &&
+                      Traits::memory_traits::is_atomic)>> {
+  using value_type  = typename Traits::value_type;
+  using handle_type = typename Kokkos::Impl::AtomicViewDataHandle<Traits>;
+  using return_type = typename Kokkos::Impl::AtomicDataElement<Traits>;
+  using track_type  = Kokkos::Impl::SharedAllocationTracker;
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type assign(value_type* arg_data_ptr,
+                            track_type const& /*arg_tracker*/) {
+    return handle_type(arg_data_ptr);
+  }
+
+  template <class SrcHandleType>
+  KOKKOS_INLINE_FUNCTION static handle_type assign(
+      const SrcHandleType& arg_handle, size_t offset) {
+    return handle_type(arg_handle + offset);
+  }
+};
+
+template <class Traits>
+struct ViewDataHandle<
+    Traits,
+    std::enable_if_t<(std::is_void<typename Traits::specialize>::value &&
+                      (!Traits::memory_traits::is_aligned) &&
+                      Traits::memory_traits::is_restrict
+#ifdef KOKKOS_ENABLE_CUDA
+                      && (!(std::is_same<typename Traits::memory_space,
+                                         Kokkos::CudaSpace>::value ||
+                            std::is_same<typename Traits::memory_space,
+                                         Kokkos::CudaUVMSpace>::value))
+#endif
+                      && (!Traits::memory_traits::is_atomic))>> {
+  using value_type  = typename Traits::value_type;
+  using handle_type = typename Traits::value_type* KOKKOS_RESTRICT;
+  using return_type = typename Traits::value_type& KOKKOS_RESTRICT;
+  using track_type  = Kokkos::Impl::SharedAllocationTracker;
+
+  KOKKOS_INLINE_FUNCTION
+  static value_type* assign(value_type* arg_data_ptr,
+                            track_type const& /*arg_tracker*/) {
+    return (value_type*)(arg_data_ptr);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static value_type* assign(handle_type const arg_data_ptr, size_t offset) {
+    return (value_type*)(arg_data_ptr + offset);
+  }
+};
+
+template <class Traits>
+struct ViewDataHandle<
+    Traits,
+    std::enable_if_t<(std::is_void<typename Traits::specialize>::value &&
+                      Traits::memory_traits::is_aligned &&
+                      (!Traits::memory_traits::is_restrict)
+#ifdef KOKKOS_ENABLE_CUDA
+                      && (!(std::is_same<typename Traits::memory_space,
+                                         Kokkos::CudaSpace>::value ||
+                            std::is_same<typename Traits::memory_space,
+                                         Kokkos::CudaUVMSpace>::value))
+#endif
+                      && (!Traits::memory_traits::is_atomic))>> {
+  using value_type = typename Traits::value_type;
+  // typedef work-around for intel compilers error #3186: expected typedef
+  // declaration
+  // NOLINTNEXTLINE(modernize-use-using)
+  typedef value_type* KOKKOS_IMPL_ALIGN_PTR(KOKKOS_MEMORY_ALIGNMENT)
+      handle_type;
+  using return_type = typename Traits::value_type&;
+  using track_type  = Kokkos::Impl::SharedAllocationTracker;
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type assign(value_type* arg_data_ptr,
+                            track_type const& /*arg_tracker*/) {
+    if (reinterpret_cast<uintptr_t>(arg_data_ptr) % Impl::MEMORY_ALIGNMENT) {
+      Kokkos::abort(
+          "Assigning NonAligned View or Pointer to Kokkos::View with Aligned "
+          "attribute");
+    }
+    return handle_type(arg_data_ptr);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static handle_type assign(handle_type const arg_data_ptr, size_t offset) {
+    if (reinterpret_cast<uintptr_t>(arg_data_ptr + offset) %
+        Impl::MEMORY_ALIGNMENT) {
+      Kokkos::abort(
+          "Assigning NonAligned View or Pointer to Kokkos::View with Aligned "
+          "attribute");
+    }
+    return handle_type(arg_data_ptr + offset);
+  }
+};
+
+template <class Traits>
+struct ViewDataHandle<
+    Traits,
+    std::enable_if_t<(std::is_void<typename Traits::specialize>::value &&
+                      Traits::memory_traits::is_aligned &&
+                      Traits::memory_traits::is_restrict
+#ifdef KOKKOS_ENABLE_CUDA
+                      && (!(std::is_same<typename Traits::memory_space,
+                                         Kokkos::CudaSpace>::value ||
+                            std::is_same<typename Traits::memory_space,
+                                         Kokkos::CudaUVMSpace>::value))
+#endif
+                      && (!Traits::memory_traits::is_atomic))>> {
+  using value_type = typename Traits::value_type;
+  // typedef work-around for intel compilers error #3186: expected typedef
+  // declaration
+  // NOLINTNEXTLINE(modernize-use-using)
+  typedef value_type* KOKKOS_IMPL_ALIGN_PTR(KOKKOS_MEMORY_ALIGNMENT)
+      handle_type;
+  using return_type = typename Traits::value_type& KOKKOS_RESTRICT;
+  using track_type  = Kokkos::Impl::SharedAllocationTracker;
+
+  KOKKOS_INLINE_FUNCTION
+  static value_type* assign(value_type* arg_data_ptr,
+                            track_type const& /*arg_tracker*/) {
+    if (reinterpret_cast<uintptr_t>(arg_data_ptr) % Impl::MEMORY_ALIGNMENT) {
+      Kokkos::abort(
+          "Assigning NonAligned View or Pointer to Kokkos::View with Aligned "
+          "attribute");
+    }
+    return (value_type*)(arg_data_ptr);
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static value_type* assign(handle_type const arg_data_ptr, size_t offset) {
+    if (reinterpret_cast<uintptr_t>(arg_data_ptr + offset) %
+        Impl::MEMORY_ALIGNMENT) {
+      Kokkos::abort(
+          "Assigning NonAligned View or Pointer to Kokkos::View with Aligned "
+          "attribute");
+    }
+    return (value_type*)(arg_data_ptr + offset);
+  }
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <typename T>
+inline bool is_zero_byte(const T& t) {
+  using comparison_type = std::conditional_t<
+      sizeof(T) % sizeof(long long int) == 0, long long int,
+      std::conditional_t<
+          sizeof(T) % sizeof(long int) == 0, long int,
+          std::conditional_t<
+              sizeof(T) % sizeof(int) == 0, int,
+              std::conditional_t<sizeof(T) % sizeof(short int) == 0, short int,
+                                 char>>>>;
+  const auto* const ptr = reinterpret_cast<const comparison_type*>(&t);
+  for (std::size_t i = 0; i < sizeof(T) / sizeof(comparison_type); ++i)
+    if (ptr[i] != 0) return false;
+  return true;
+}
+
+//----------------------------------------------------------------------------
+
+/*
+ *  The construction, assignment to default, and destruction
+ *  are merged into a single functor.
+ *  Primarily to work around an unresolved CUDA back-end bug
+ *  that would lose the destruction cuda device function when
+ *  called from the shared memory tracking destruction.
+ *  Secondarily to have two fewer partial specializations.
+ */
+template <class DeviceType, class ValueType,
+          bool IsScalar = std::is_scalar<ValueType>::value>
+struct ViewValueFunctor;
+
+template <class DeviceType, class ValueType>
+struct ViewValueFunctor<DeviceType, ValueType, false /* is_scalar */> {
+  using ExecSpace  = typename DeviceType::execution_space;
+  using PolicyType = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<int64_t>>;
+
+  ExecSpace space;
+  ValueType* ptr;
+  size_t n;
+  bool destroy;
+  std::string name;
+  bool default_exec_space;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const size_t i) const {
+    if (destroy) {
+      (ptr + i)->~ValueType();
+    }  // KOKKOS_IMPL_CUDA_CLANG_WORKAROUND this line causes ptax error
+       // __cxa_begin_catch in nested_view unit-test
+    else {
+      new (ptr + i) ValueType();
+    }
+  }
+
+  ViewValueFunctor()                        = default;
+  ViewValueFunctor(const ViewValueFunctor&) = default;
+  ViewValueFunctor& operator=(const ViewValueFunctor&) = default;
+
+  ViewValueFunctor(ExecSpace const& arg_space, ValueType* const arg_ptr,
+                   size_t const arg_n, std::string arg_name)
+      : space(arg_space),
+        ptr(arg_ptr),
+        n(arg_n),
+        destroy(false),
+        name(std::move(arg_name)),
+        default_exec_space(false) {}
+
+  ViewValueFunctor(ValueType* const arg_ptr, size_t const arg_n,
+                   std::string arg_name)
+      : space(ExecSpace{}),
+        ptr(arg_ptr),
+        n(arg_n),
+        destroy(false),
+        name(std::move(arg_name)),
+        default_exec_space(true) {}
+
+  template <typename Dummy = ValueType>
+  std::enable_if_t<std::is_trivial<Dummy>::value &&
+                   std::is_trivially_copy_assignable<ValueType>::value>
+  construct_dispatch() {
+    ValueType value{};
+// On A64FX memset seems to do the wrong thing with regards to first touch
+// leading to the significant performance issues
+#ifndef KOKKOS_ARCH_A64FX
+    if (Impl::is_zero_byte(value)) {
+      uint64_t kpID = 0;
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        // We are not really using parallel_for here but using beginParallelFor
+        // instead of begin_parallel_for (and adding "via memset") is the best
+        // we can do to indicate that this is not supposed to be tunable (and
+        // doesn't really execute a parallel_for).
+        Kokkos::Profiling::beginParallelFor(
+            "Kokkos::View::initialization [" + name + "] via memset",
+            Kokkos::Profiling::Experimental::device_id(space), &kpID);
+      }
+      (void)ZeroMemset<ExecSpace, ValueType*, typename DeviceType::memory_space,
+                       Kokkos::MemoryTraits<Kokkos::Unmanaged>>(
+          space,
+          Kokkos::View<ValueType*, typename DeviceType::memory_space,
+                       Kokkos::MemoryTraits<Kokkos::Unmanaged>>(ptr, n),
+          value);
+
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        Kokkos::Profiling::endParallelFor(kpID);
+      }
+      if (default_exec_space)
+        space.fence("Kokkos::Impl::ViewValueFunctor: View init/destroy fence");
+    } else {
+#endif
+      parallel_for_implementation(false);
+#ifndef KOKKOS_ARCH_A64FX
+    }
+#endif
+  }
+
+  template <typename Dummy = ValueType>
+  std::enable_if_t<!(std::is_trivial<Dummy>::value &&
+                     std::is_trivially_copy_assignable<ValueType>::value)>
+  construct_dispatch() {
+    parallel_for_implementation(false);
+  }
+
+  void parallel_for_implementation(bool arg) {
+    destroy = arg;
+    if (!space.in_parallel()) {
+      PolicyType policy(0, n);
+      std::string functor_name;
+      uint64_t kpID = 0;
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        functor_name =
+            (destroy ? "Kokkos::View::destruction [" + functor_name + "]"
+                     : "Kokkos::View::initialization [" + functor_name + "]");
+        Kokkos::Profiling::beginParallelFor(
+            "Kokkos::View::initialization [" + functor_name + "]",
+            Kokkos::Profiling::Experimental::device_id(space), &kpID);
+      }
+
+#ifdef KOKKOS_ENABLE_CUDA
+      if (std::is_same<ExecSpace, Kokkos::Cuda>::value) {
+        Kokkos::Impl::cuda_prefetch_pointer(space, ptr, sizeof(ValueType) * n,
+                                            true);
+      }
+#endif
+      const Kokkos::Impl::ParallelFor<ViewValueFunctor, PolicyType> closure(
+          *this, policy);
+      closure.execute();
+      if (default_exec_space || destroy)
+        space.fence("Kokkos::Impl::ViewValueFunctor: View init/destroy fence");
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        Kokkos::Profiling::endParallelFor(kpID);
+      }
+    } else {
+      for (size_t i = 0; i < n; ++i) operator()(i);
+    }
+  }
+
+  void construct_shared_allocation() { construct_dispatch(); }
+
+  void destroy_shared_allocation() { parallel_for_implementation(true); }
+};
+
+template <class DeviceType, class ValueType>
+struct ViewValueFunctor<DeviceType, ValueType, true /* is_scalar */> {
+  using ExecSpace  = typename DeviceType::execution_space;
+  using PolicyType = Kokkos::RangePolicy<ExecSpace, Kokkos::IndexType<int64_t>>;
+
+  ExecSpace space;
+  ValueType* ptr;
+  size_t n;
+  std::string name;
+  bool default_exec_space;
+
+  KOKKOS_INLINE_FUNCTION
+  void operator()(const size_t i) const { ptr[i] = ValueType(); }
+
+  ViewValueFunctor()                        = default;
+  ViewValueFunctor(const ViewValueFunctor&) = default;
+  ViewValueFunctor& operator=(const ViewValueFunctor&) = default;
+
+  ViewValueFunctor(ExecSpace const& arg_space, ValueType* const arg_ptr,
+                   size_t const arg_n, std::string arg_name)
+      : space(arg_space),
+        ptr(arg_ptr),
+        n(arg_n),
+        name(std::move(arg_name)),
+        default_exec_space(false) {}
+
+  ViewValueFunctor(ValueType* const arg_ptr, size_t const arg_n,
+                   std::string arg_name)
+      : space(ExecSpace{}),
+        ptr(arg_ptr),
+        n(arg_n),
+        name(std::move(arg_name)),
+        default_exec_space(true) {}
+
+  template <typename Dummy = ValueType>
+  std::enable_if_t<std::is_trivial<Dummy>::value &&
+                   std::is_trivially_copy_assignable<Dummy>::value>
+  construct_shared_allocation() {
+    // Shortcut for zero initialization
+    ValueType value{};
+// On A64FX memset seems to do the wrong thing with regards to first touch
+// leading to the significant performance issues
+#ifndef KOKKOS_ARCH_A64FX
+    if (Impl::is_zero_byte(value)) {
+      uint64_t kpID = 0;
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        // We are not really using parallel_for here but using beginParallelFor
+        // instead of begin_parallel_for (and adding "via memset") is the best
+        // we can do to indicate that this is not supposed to be tunable (and
+        // doesn't really execute a parallel_for).
+        Kokkos::Profiling::beginParallelFor(
+            "Kokkos::View::initialization [" + name + "] via memset",
+            Kokkos::Profiling::Experimental::device_id(space), &kpID);
+      }
+
+      (void)ZeroMemset<ExecSpace, ValueType*, typename DeviceType::memory_space,
+                       Kokkos::MemoryTraits<Kokkos::Unmanaged>>(
+          space,
+          Kokkos::View<ValueType*, typename DeviceType::memory_space,
+                       Kokkos::MemoryTraits<Kokkos::Unmanaged>>(ptr, n),
+          value);
+
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        Kokkos::Profiling::endParallelFor(kpID);
+      }
+      if (default_exec_space)
+        space.fence("Kokkos::Impl::ViewValueFunctor: View init/destroy fence");
+    } else {
+#endif
+      parallel_for_implementation();
+#ifndef KOKKOS_ARCH_A64FX
+    }
+#endif
+  }
+
+  template <typename Dummy = ValueType>
+  std::enable_if_t<!(std::is_trivial<Dummy>::value &&
+                     std::is_trivially_copy_assignable<Dummy>::value)>
+  construct_shared_allocation() {
+    parallel_for_implementation();
+  }
+
+  void parallel_for_implementation() {
+    if (!space.in_parallel()) {
+      PolicyType policy(0, n);
+      std::string functor_name = "Kokkos::View::initialization [" + name + "]";
+      uint64_t kpID            = 0;
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        Kokkos::Profiling::beginParallelFor(
+            "Kokkos::View::initialization [" + name + "]",
+            Kokkos::Profiling::Experimental::device_id(space), &kpID);
+      }
+#ifdef KOKKOS_ENABLE_CUDA
+      if (std::is_same<ExecSpace, Kokkos::Cuda>::value) {
+        Kokkos::Impl::cuda_prefetch_pointer(space, ptr, sizeof(ValueType) * n,
+                                            true);
+      }
+#endif
+      const Kokkos::Impl::ParallelFor<ViewValueFunctor, PolicyType> closure(
+          *this, PolicyType(0, n));
+      closure.execute();
+      if (default_exec_space)
+        space.fence(
+            "Kokkos::Impl::ViewValueFunctor: Fence after setting values in "
+            "view");
+      if (Kokkos::Profiling::profileLibraryLoaded()) {
+        Kokkos::Profiling::endParallelFor(kpID);
+      }
+    } else {
+      for (size_t i = 0; i < n; ++i) operator()(i);
+    }
+  }
+
+  void destroy_shared_allocation() {}
+};
+
+//----------------------------------------------------------------------------
+/** \brief  View mapping for non-specialized data type and standard layout */
+template <class Traits>
+class ViewMapping<
+    Traits,
+    std::enable_if_t<(
+        std::is_void<typename Traits::specialize>::value &&
+        ViewOffset<typename Traits::dimension, typename Traits::array_layout,
+                   void>::is_mapping_plugin::value)>> {
+ public:
+  using offset_type = ViewOffset<typename Traits::dimension,
+                                 typename Traits::array_layout, void>;
+
+  using handle_type = typename ViewDataHandle<Traits>::handle_type;
+
+  handle_type m_impl_handle;
+  offset_type m_impl_offset;
+
+ private:
+  template <class, class...>
+  friend class ViewMapping;
+
+  KOKKOS_INLINE_FUNCTION
+  ViewMapping(const handle_type& arg_handle, const offset_type& arg_offset)
+      : m_impl_handle(arg_handle), m_impl_offset(arg_offset) {}
+
+ public:
+  using printable_label_typedef = void;
+  enum { is_managed = Traits::is_managed };
+
+  //----------------------------------------
+  // Domain dimensions
+
+  enum { Rank = Traits::dimension::rank };
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION constexpr size_t extent(const iType& r) const {
+    return m_impl_offset.m_dim.extent(r);
+  }
+
+  static KOKKOS_INLINE_FUNCTION constexpr size_t static_extent(
+      const unsigned r) noexcept {
+    using dim_type = typename offset_type::dimension_type;
+    return dim_type::static_extent(r);
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr typename Traits::array_layout layout()
+      const {
+    return m_impl_offset.layout();
+  }
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_0() const {
+    return m_impl_offset.dimension_0();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_1() const {
+    return m_impl_offset.dimension_1();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_2() const {
+    return m_impl_offset.dimension_2();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_3() const {
+    return m_impl_offset.dimension_3();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_4() const {
+    return m_impl_offset.dimension_4();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_5() const {
+    return m_impl_offset.dimension_5();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_6() const {
+    return m_impl_offset.dimension_6();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t dimension_7() const {
+    return m_impl_offset.dimension_7();
+  }
+
+  // Is a regular layout with uniform striding for each index.
+  using is_regular = typename offset_type::is_regular;
+
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_0() const {
+    return m_impl_offset.stride_0();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_1() const {
+    return m_impl_offset.stride_1();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_2() const {
+    return m_impl_offset.stride_2();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_3() const {
+    return m_impl_offset.stride_3();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_4() const {
+    return m_impl_offset.stride_4();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_5() const {
+    return m_impl_offset.stride_5();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_6() const {
+    return m_impl_offset.stride_6();
+  }
+  KOKKOS_INLINE_FUNCTION constexpr size_t stride_7() const {
+    return m_impl_offset.stride_7();
+  }
+
+  template <typename iType>
+  KOKKOS_INLINE_FUNCTION void stride(iType* const s) const {
+    m_impl_offset.stride(s);
+  }
+
+  //----------------------------------------
+  // Range span
+
+  /** \brief  Span of the mapped range */
+  KOKKOS_INLINE_FUNCTION constexpr size_t span() const {
+    return m_impl_offset.span();
+  }
+
+  /** \brief  Is the mapped range span contiguous */
+  KOKKOS_INLINE_FUNCTION constexpr bool span_is_contiguous() const {
+    return m_impl_offset.span_is_contiguous();
+  }
+
+  using reference_type = typename ViewDataHandle<Traits>::return_type;
+  using pointer_type   = typename Traits::value_type*;
+
+  /** \brief  Query raw pointer to memory */
+  KOKKOS_INLINE_FUNCTION constexpr pointer_type data() const {
+    return m_impl_handle;
+  }
+
+  //----------------------------------------
+  // The View class performs all rank and bounds checking before
+  // calling these element reference methods.
+
+  KOKKOS_FORCEINLINE_FUNCTION
+  reference_type reference() const { return m_impl_handle[0]; }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(std::is_integral<I0>::value &&
+                        // if layout is neither stride nor irregular,
+                        // then just use the handle directly
+                        !(std::is_same<typename Traits::array_layout,
+                                       Kokkos::LayoutStride>::value ||
+                          !is_regular::value)),
+                       reference_type>
+      reference(const I0& i0) const {
+    return m_impl_handle[i0];
+  }
+
+  template <typename I0>
+  KOKKOS_FORCEINLINE_FUNCTION
+      std::enable_if_t<(std::is_integral<I0>::value &&
+                        // if the layout is strided or irregular, then
+                        // we have to use the offset
+                        (std::is_same<typename Traits::array_layout,
+                                      Kokkos::LayoutStride>::value ||
+                         !is_regular::value)),
+                       reference_type>
+      reference(const I0& i0) const {
+    return m_impl_handle[m_impl_offset(i0)];
+  }
+
+  template <typename I0, typename I1>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0& i0,
+                                                       const I1& i1) const {
+    return m_impl_handle[m_impl_offset(i0, i1)];
+  }
+
+  template <typename I0, typename I1, typename I2>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0& i0,
+                                                       const I1& i1,
+                                                       const I2& i2) const {
+    return m_impl_handle[m_impl_offset(i0, i1, i2)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0& i0, const I1& i1, const I2& i2, const I3& i3) const {
+    return m_impl_handle[m_impl_offset(i0, i1, i2, i3)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type reference(const I0& i0,
+                                                       const I1& i1,
+                                                       const I2& i2,
+                                                       const I3& i3,
+                                                       const I4& i4) const {
+    return m_impl_handle[m_impl_offset(i0, i1, i2, i3, i4)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+            const I4& i4, const I5& i5) const {
+    return m_impl_handle[m_impl_offset(i0, i1, i2, i3, i4, i5)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+            const I4& i4, const I5& i5, const I6& i6) const {
+    return m_impl_handle[m_impl_offset(i0, i1, i2, i3, i4, i5, i6)];
+  }
+
+  template <typename I0, typename I1, typename I2, typename I3, typename I4,
+            typename I5, typename I6, typename I7>
+  KOKKOS_FORCEINLINE_FUNCTION reference_type
+  reference(const I0& i0, const I1& i1, const I2& i2, const I3& i3,
+            const I4& i4, const I5& i5, const I6& i6, const I7& i7) const {
+    return m_impl_handle[m_impl_offset(i0, i1, i2, i3, i4, i5, i6, i7)];
+  }
+
+  //----------------------------------------
+
+ private:
+  enum { MemorySpanMask = 8 - 1 /* Force alignment on 8 byte boundary */ };
+  enum { MemorySpanSize = sizeof(typename Traits::value_type) };
+
+ public:
+  /** \brief  Span, in bytes, of the referenced memory */
+  KOKKOS_INLINE_FUNCTION constexpr size_t memory_span() const {
+    return (m_impl_offset.span() * sizeof(typename Traits::value_type) +
+            MemorySpanMask) &
+           ~size_t(MemorySpanMask);
+  }
+
+  //----------------------------------------
+
+  KOKKOS_DEFAULTED_FUNCTION ~ViewMapping() = default;
+  KOKKOS_INLINE_FUNCTION ViewMapping() : m_impl_handle(), m_impl_offset() {}
+
+  KOKKOS_DEFAULTED_FUNCTION ViewMapping(const ViewMapping&) = default;
+  KOKKOS_DEFAULTED_FUNCTION ViewMapping& operator=(const ViewMapping&) =
+      default;
+
+  KOKKOS_DEFAULTED_FUNCTION ViewMapping(ViewMapping&&) = default;
+  KOKKOS_DEFAULTED_FUNCTION ViewMapping& operator=(ViewMapping&&) = default;
+
+  //----------------------------------------
+
+  /**\brief  Span, in bytes, of the required memory */
+  KOKKOS_INLINE_FUNCTION
+  static constexpr size_t memory_span(
+      typename Traits::array_layout const& arg_layout) {
+    using padding = std::integral_constant<unsigned int, 0>;
+    return (offset_type(padding(), arg_layout).span() * MemorySpanSize +
+            MemorySpanMask) &
+           ~size_t(MemorySpanMask);
+  }
+
+  /**\brief  Wrap a span of memory */
+  template <class... P>
+  KOKKOS_INLINE_FUNCTION ViewMapping(
+      Kokkos::Impl::ViewCtorProp<P...> const& arg_prop,
+      typename Traits::array_layout const& arg_layout)
+      : m_impl_handle(
+            ((Kokkos::Impl::ViewCtorProp<void, pointer_type> const&)arg_prop)
+                .value),
+        m_impl_offset(std::integral_constant<unsigned, 0>(), arg_layout) {}
+
+  /**\brief  Assign data */
+  KOKKOS_INLINE_FUNCTION
+  void assign_data(pointer_type arg_ptr) {
+    m_impl_handle = handle_type(arg_ptr);
+  }
+
+  //----------------------------------------
+  /*  Allocate and construct mapped array.
+   *  Allocate via shared allocation record and
+   *  return that record for allocation tracking.
+   */
+  template <class... P>
+  Kokkos::Impl::SharedAllocationRecord<>* allocate_shared(
+      Kokkos::Impl::ViewCtorProp<P...> const& arg_prop,
+      typename Traits::array_layout const& arg_layout,
+      bool execution_space_specified) {
+    using alloc_prop = Kokkos::Impl::ViewCtorProp<P...>;
+
+    using execution_space = typename alloc_prop::execution_space;
+    using memory_space    = typename Traits::memory_space;
+    using value_type      = typename Traits::value_type;
+    using functor_type =
+        ViewValueFunctor<Kokkos::Device<execution_space, memory_space>,
+                         value_type>;
+    using record_type =
+        Kokkos::Impl::SharedAllocationRecord<memory_space, functor_type>;
+
+    // Query the mapping for byte-size of allocation.
+    // If padding is allowed then pass in sizeof value type
+    // for padding computation.
+    using padding = std::integral_constant<
+        unsigned int, alloc_prop::allow_padding ? sizeof(value_type) : 0>;
+
+    m_impl_offset = offset_type(padding(), arg_layout);
+
+    const size_t alloc_size =
+        (m_impl_offset.span() * MemorySpanSize + MemorySpanMask) &
+        ~size_t(MemorySpanMask);
+    const std::string& alloc_name =
+        static_cast<Kokkos::Impl::ViewCtorProp<void, std::string> const&>(
+            arg_prop)
+            .value;
+    const execution_space& exec_space =
+        static_cast<Kokkos::Impl::ViewCtorProp<void, execution_space> const&>(
+            arg_prop)
+            .value;
+    const memory_space& mem_space =
+        static_cast<Kokkos::Impl::ViewCtorProp<void, memory_space> const&>(
+            arg_prop)
+            .value;
+
+    // Create shared memory tracking record with allocate memory from the memory
+    // space
+    record_type* const record =
+        execution_space_specified
+            ? record_type::allocate(exec_space, mem_space, alloc_name,
+                                    alloc_size)
+            : record_type::allocate(mem_space, alloc_name, alloc_size);
+
+    m_impl_handle = handle_type(reinterpret_cast<pointer_type>(record->data()));
+
+    //  Only initialize if the allocation is non-zero.
+    //  May be zero if one of the dimensions is zero.
+    if (alloc_size && alloc_prop::initialize) {
+      // Assume destruction is only required when construction is requested.
+      // The ViewValueFunctor has both value construction and destruction
+      // operators.
+      record->m_destroy =
+          execution_space_specified
+              ? functor_type(exec_space, (value_type*)m_impl_handle,
+                             m_impl_offset.span(), alloc_name)
+              : functor_type((value_type*)m_impl_handle, m_impl_offset.span(),
+                             alloc_name);
+
+      // Construct values
+      record->m_destroy.construct_shared_allocation();
+    }
+
+    return record;
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+/** \brief  Assign compatible default mappings */
+
+template <class DstTraits, class SrcTraits>
+class ViewMapping<
+    DstTraits, SrcTraits,
+    std::enable_if_t<(
+        !(std::is_same<typename SrcTraits::array_layout, LayoutStride>::
+              value) &&  // Added to have a new specialization for SrcType of
+                         // LayoutStride
+        // default mappings
+        std::is_void<typename DstTraits::specialize>::value &&
+        std::is_void<typename SrcTraits::specialize>::value &&
+        (
+            // same layout
+            std::is_same<typename DstTraits::array_layout,
+                         typename SrcTraits::array_layout>::value ||
+            // known layout
+            ((std::is_same<typename DstTraits::array_layout,
+                           Kokkos::LayoutLeft>::value ||
+              std::is_same<typename DstTraits::array_layout,
+                           Kokkos::LayoutRight>::value ||
+              std::is_same<typename DstTraits::array_layout,
+                           Kokkos::LayoutStride>::value) &&
+             (std::is_same<typename SrcTraits::array_layout,
+                           Kokkos::LayoutLeft>::value ||
+              std::is_same<typename SrcTraits::array_layout,
+                           Kokkos::LayoutRight>::value ||
+              std::is_same<typename SrcTraits::array_layout,
+                           Kokkos::LayoutStride>::value))))>> {
+ private:
+  enum {
+    is_assignable_space = Kokkos::Impl::MemorySpaceAccess<
+        typename DstTraits::memory_space,
+        typename SrcTraits::memory_space>::assignable
+  };
+
+  enum {
+    is_assignable_value_type =
+        std::is_same<typename DstTraits::value_type,
+                     typename SrcTraits::value_type>::value ||
+        std::is_same<typename DstTraits::value_type,
+                     typename SrcTraits::const_value_type>::value
+  };
+
+  enum {
+    is_assignable_dimension =
+        ViewDimensionAssignable<typename DstTraits::dimension,
+                                typename SrcTraits::dimension>::value
+  };
+
+  enum {
+    is_assignable_layout =
+        std::is_same<typename DstTraits::array_layout,
+                     typename SrcTraits::array_layout>::value ||
+        std::is_same<typename DstTraits::array_layout,
+                     Kokkos::LayoutStride>::value ||
+        (DstTraits::dimension::rank == 0) ||
+        (DstTraits::dimension::rank == 1 &&
+         DstTraits::dimension::rank_dynamic == 1)
+  };
+
+ public:
+  enum {
+    is_assignable_data_type =
+        is_assignable_value_type && is_assignable_dimension
+  };
+  enum {
+    is_assignable = is_assignable_space && is_assignable_value_type &&
+                    is_assignable_dimension && is_assignable_layout
+  };
+
+  using TrackType = Kokkos::Impl::SharedAllocationTracker;
+  using DstType   = ViewMapping<DstTraits, void>;
+  using SrcType   = ViewMapping<SrcTraits, void>;
+
+  KOKKOS_INLINE_FUNCTION
+  static void assign(DstType& dst, const SrcType& src,
+                     const TrackType& src_track) {
+    static_assert(is_assignable_space,
+                  "View assignment must have compatible spaces");
+
+    static_assert(
+        is_assignable_value_type,
+        "View assignment must have same value type or const = non-const");
+
+    static_assert(is_assignable_dimension,
+                  "View assignment must have compatible dimensions");
+
+    static_assert(
+        is_assignable_layout,
+        "View assignment must have compatible layout or have rank <= 1");
+
+    using dst_offset_type = typename DstType::offset_type;
+
+    if (size_t(DstTraits::dimension::rank_dynamic) <
+        size_t(SrcTraits::dimension::rank_dynamic)) {
+      using dst_dim   = typename DstTraits::dimension;
+      bool assignable = ((1 > DstTraits::dimension::rank_dynamic &&
+                          1 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN0 == src.dimension_0()
+                             : true) &&
+                        ((2 > DstTraits::dimension::rank_dynamic &&
+                          2 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN1 == src.dimension_1()
+                             : true) &&
+                        ((3 > DstTraits::dimension::rank_dynamic &&
+                          3 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN2 == src.dimension_2()
+                             : true) &&
+                        ((4 > DstTraits::dimension::rank_dynamic &&
+                          4 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN3 == src.dimension_3()
+                             : true) &&
+                        ((5 > DstTraits::dimension::rank_dynamic &&
+                          5 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN4 == src.dimension_4()
+                             : true) &&
+                        ((6 > DstTraits::dimension::rank_dynamic &&
+                          6 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN5 == src.dimension_5()
+                             : true) &&
+                        ((7 > DstTraits::dimension::rank_dynamic &&
+                          7 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN6 == src.dimension_6()
+                             : true) &&
+                        ((8 > DstTraits::dimension::rank_dynamic &&
+                          8 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN7 == src.dimension_7()
+                             : true);
+      if (!assignable)
+        Kokkos::abort(
+            "View Assignment: trying to assign runtime dimension to non "
+            "matching compile time dimension.");
+    }
+    dst.m_impl_offset = dst_offset_type(src.m_impl_offset);
+    dst.m_impl_handle = Kokkos::Impl::ViewDataHandle<DstTraits>::assign(
+        src.m_impl_handle, src_track);
+  }
+};
+
+//----------------------------------------------------------------------------
+// Create new specialization for SrcType of LayoutStride. Runtime check for
+// compatible layout
+template <class DstTraits, class SrcTraits>
+class ViewMapping<
+    DstTraits, SrcTraits,
+    std::enable_if_t<(
+        std::is_same<typename SrcTraits::array_layout,
+                     Kokkos::LayoutStride>::value &&
+        std::is_void<typename DstTraits::specialize>::value &&
+        std::is_void<typename SrcTraits::specialize>::value &&
+        (
+            // same layout
+            std::is_same<typename DstTraits::array_layout,
+                         typename SrcTraits::array_layout>::value ||
+            // known layout
+            (std::is_same<typename DstTraits::array_layout,
+                          Kokkos::LayoutLeft>::value ||
+             std::is_same<typename DstTraits::array_layout,
+                          Kokkos::LayoutRight>::value ||
+             std::is_same<typename DstTraits::array_layout,
+                          Kokkos::LayoutStride>::value)))>> {
+ private:
+  enum {
+    is_assignable_space = Kokkos::Impl::MemorySpaceAccess<
+        typename DstTraits::memory_space,
+        typename SrcTraits::memory_space>::assignable
+  };
+
+  enum {
+    is_assignable_value_type =
+        std::is_same<typename DstTraits::value_type,
+                     typename SrcTraits::value_type>::value ||
+        std::is_same<typename DstTraits::value_type,
+                     typename SrcTraits::const_value_type>::value
+  };
+
+  enum {
+    is_assignable_dimension =
+        ViewDimensionAssignable<typename DstTraits::dimension,
+                                typename SrcTraits::dimension>::value
+  };
+
+ public:
+  enum {
+    is_assignable_data_type =
+        is_assignable_value_type && is_assignable_dimension
+  };
+  enum {
+    is_assignable = is_assignable_space && is_assignable_value_type &&
+                    is_assignable_dimension
+  };
+
+  using TrackType = Kokkos::Impl::SharedAllocationTracker;
+  using DstType   = ViewMapping<DstTraits, void>;
+  using SrcType   = ViewMapping<SrcTraits, void>;
+
+  KOKKOS_INLINE_FUNCTION
+  static bool assignable_layout_check(DstType&,
+                                      const SrcType& src)  // Runtime check
+  {
+    size_t strides[9];
+    bool assignable = true;
+    src.stride(strides);
+    size_t exp_stride = 1;
+    if (std::is_same<typename DstTraits::array_layout,
+                     Kokkos::LayoutLeft>::value) {
+      for (int i = 0; i < src.Rank; i++) {
+        if (i > 0) exp_stride *= src.extent(i - 1);
+        if (strides[i] != exp_stride) {
+          assignable = false;
+          break;
+        }
+      }
+    } else if (std::is_same<typename DstTraits::array_layout,
+                            Kokkos::LayoutRight>::value) {
+      for (int i = src.Rank - 1; i >= 0; i--) {
+        if (i < src.Rank - 1) exp_stride *= src.extent(i + 1);
+        if (strides[i] != exp_stride) {
+          assignable = false;
+          break;
+        }
+      }
+    }
+    return assignable;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  static void assign(DstType& dst, const SrcType& src,
+                     const TrackType& src_track) {
+    static_assert(is_assignable_space,
+                  "View assignment must have compatible spaces");
+
+    static_assert(
+        is_assignable_value_type,
+        "View assignment must have same value type or const = non-const");
+
+    static_assert(is_assignable_dimension,
+                  "View assignment must have compatible dimensions");
+
+    bool assignable_layout = assignable_layout_check(dst, src);  // Runtime
+                                                                 // check
+    if (!assignable_layout)
+      Kokkos::abort("View assignment must have compatible layouts\n");
+
+    using dst_offset_type = typename DstType::offset_type;
+
+    if (size_t(DstTraits::dimension::rank_dynamic) <
+        size_t(SrcTraits::dimension::rank_dynamic)) {
+      using dst_dim   = typename DstTraits::dimension;
+      bool assignable = ((1 > DstTraits::dimension::rank_dynamic &&
+                          1 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN0 == src.dimension_0()
+                             : true) &&
+                        ((2 > DstTraits::dimension::rank_dynamic &&
+                          2 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN1 == src.dimension_1()
+                             : true) &&
+                        ((3 > DstTraits::dimension::rank_dynamic &&
+                          3 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN2 == src.dimension_2()
+                             : true) &&
+                        ((4 > DstTraits::dimension::rank_dynamic &&
+                          4 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN3 == src.dimension_3()
+                             : true) &&
+                        ((5 > DstTraits::dimension::rank_dynamic &&
+                          5 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN4 == src.dimension_4()
+                             : true) &&
+                        ((6 > DstTraits::dimension::rank_dynamic &&
+                          6 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN5 == src.dimension_5()
+                             : true) &&
+                        ((7 > DstTraits::dimension::rank_dynamic &&
+                          7 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN6 == src.dimension_6()
+                             : true) &&
+                        ((8 > DstTraits::dimension::rank_dynamic &&
+                          8 <= SrcTraits::dimension::rank_dynamic)
+                             ? dst_dim::ArgN7 == src.dimension_7()
+                             : true);
+      if (!assignable)
+        Kokkos::abort(
+            "View Assignment: trying to assign runtime dimension to non "
+            "matching compile time dimension.");
+    }
+    dst.m_impl_offset = dst_offset_type(src.m_impl_offset);
+    dst.m_impl_handle = Kokkos::Impl::ViewDataHandle<DstTraits>::assign(
+        src.m_impl_handle, src_track);
+  }
+};
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+// Subview mapping.
+// Deduce destination view type from source view traits and subview arguments
+
+template <class, class ValueType, class Exts, class... Args>
+struct SubViewDataTypeImpl;
+
+/* base case */
+template <class ValueType>
+struct SubViewDataTypeImpl<void, ValueType, Kokkos::Experimental::Extents<>> {
+  using type = ValueType;
+};
+
+/* for integral args, subview doesn't have that dimension */
+template <class ValueType, ptrdiff_t Ext, ptrdiff_t... Exts, class Integral,
+          class... Args>
+struct SubViewDataTypeImpl<
+    std::enable_if_t<std::is_integral<std::decay_t<Integral>>::value>,
+    ValueType, Kokkos::Experimental::Extents<Ext, Exts...>, Integral, Args...>
+    : SubViewDataTypeImpl<void, ValueType,
+                          Kokkos::Experimental::Extents<Exts...>, Args...> {};
+
+/* for ALL slice, subview has the same dimension */
+template <class ValueType, ptrdiff_t Ext, ptrdiff_t... Exts, class... Args>
+struct SubViewDataTypeImpl<void, ValueType,
+                           Kokkos::Experimental::Extents<Ext, Exts...>, ALL_t,
+                           Args...>
+    : SubViewDataTypeImpl<void, typename ApplyExtent<ValueType, Ext>::type,
+                          Kokkos::Experimental::Extents<Exts...>, Args...> {};
+
+/* for pair-style slice, subview has dynamic dimension, since pair doesn't give
+ * static sizes */
+/* Since we don't allow interleaving of dynamic and static extents, make all of
+ * the dimensions to the left dynamic  */
+template <class ValueType, ptrdiff_t Ext, ptrdiff_t... Exts, class PairLike,
+          class... Args>
+struct SubViewDataTypeImpl<
+    std::enable_if_t<is_pair_like<PairLike>::value>, ValueType,
+    Kokkos::Experimental::Extents<Ext, Exts...>, PairLike, Args...>
+    : SubViewDataTypeImpl<
+          void, typename make_all_extents_into_pointers<ValueType>::type*,
+          Kokkos::Experimental::Extents<Exts...>, Args...> {};
+
+template <class ValueType, class Exts, class... Args>
+struct SubViewDataType : SubViewDataTypeImpl<void, ValueType, Exts, Args...> {};
+
+//----------------------------------------------------------------------------
+
+template <class SrcTraits, class... Args>
+class ViewMapping<
+    std::enable_if_t<(std::is_void<typename SrcTraits::specialize>::value &&
+                      (std::is_same<typename SrcTraits::array_layout,
+                                    Kokkos::LayoutLeft>::value ||
+                       std::is_same<typename SrcTraits::array_layout,
+                                    Kokkos::LayoutRight>::value ||
+                       std::is_same<typename SrcTraits::array_layout,
+                                    Kokkos::LayoutStride>::value))>,
+    SrcTraits, Args...> {
+ private:
+  static_assert(SrcTraits::rank == sizeof...(Args),
+                "Subview mapping requires one argument for each dimension of "
+                "source View");
+
+  enum {
+    RZ = false,
+    R0 = bool(is_integral_extent<0, Args...>::value),
+    R1 = bool(is_integral_extent<1, Args...>::value),
+    R2 = bool(is_integral_extent<2, Args...>::value),
+    R3 = bool(is_integral_extent<3, Args...>::value),
+    R4 = bool(is_integral_extent<4, Args...>::value),
+    R5 = bool(is_integral_extent<5, Args...>::value),
+    R6 = bool(is_integral_extent<6, Args...>::value),
+    R7 = bool(is_integral_extent<7, Args...>::value)
+  };
+
+  enum {
+    rank = unsigned(R0) + unsigned(R1) + unsigned(R2) + unsigned(R3) +
+           unsigned(R4) + unsigned(R5) + unsigned(R6) + unsigned(R7)
+  };
+
+  // Whether right-most rank is a range.
+  enum {
+    R0_rev =
+        (0 == SrcTraits::rank
+             ? RZ
+             : (1 == SrcTraits::rank
+                    ? R0
+                    : (2 == SrcTraits::rank
+                           ? R1
+                           : (3 == SrcTraits::rank
+                                  ? R2
+                                  : (4 == SrcTraits::rank
+                                         ? R3
+                                         : (5 == SrcTraits::rank
+                                                ? R4
+                                                : (6 == SrcTraits::rank
+                                                       ? R5
+                                                       : (7 == SrcTraits::rank
+                                                              ? R6
+                                                              : R7))))))))
+  };
+
+  // Subview's layout
+  using array_layout = std::conditional_t<
+      (            /* Same array layout IF */
+       (rank == 0) /* output rank zero */
+       || SubviewLegalArgsCompileTime<typename SrcTraits::array_layout,
+                                      typename SrcTraits::array_layout, rank,
+                                      SrcTraits::rank, 0, Args...>::value ||
+       // OutputRank 1 or 2, InputLayout Left, Interval 0
+       // because single stride one or second index has a stride.
+       (rank <= 2 && R0 &&
+        std::is_same<typename SrcTraits::array_layout,
+                     Kokkos::LayoutLeft>::value)  // replace with input rank
+       ||
+       // OutputRank 1 or 2, InputLayout Right, Interval [InputRank-1]
+       // because single stride one or second index has a stride.
+       (rank <= 2 && R0_rev &&
+        std::is_same<typename SrcTraits::array_layout,
+                     Kokkos::LayoutRight>::value)  // replace input rank
+       ),
+      typename SrcTraits::array_layout, Kokkos::LayoutStride>;
+
+  using value_type = typename SrcTraits::value_type;
+
+  using data_type =
+      typename SubViewDataType<value_type,
+                               typename Kokkos::Impl::ParseViewExtents<
+                                   typename SrcTraits::data_type>::type,
+                               Args...>::type;
+
+ public:
+  using traits_type = Kokkos::ViewTraits<data_type, array_layout,
+                                         typename SrcTraits::device_type,
+                                         typename SrcTraits::memory_traits>;
+
+  using type =
+      Kokkos::View<data_type, array_layout, typename SrcTraits::device_type,
+                   typename SrcTraits::memory_traits>;
+
+  template <class MemoryTraits>
+  struct apply {
+    static_assert(Kokkos::is_memory_traits<MemoryTraits>::value, "");
+
+    using traits_type =
+        Kokkos::ViewTraits<data_type, array_layout,
+                           typename SrcTraits::device_type, MemoryTraits>;
+
+    using type = Kokkos::View<data_type, array_layout,
+                              typename SrcTraits::device_type, MemoryTraits>;
+  };
+
+  // The presumed type is 'ViewMapping< traits_type , void >'
+  // However, a compatible ViewMapping is acceptable.
+  template <class DstTraits>
+  KOKKOS_INLINE_FUNCTION static void assign(
+      ViewMapping<DstTraits, void>& dst,
+      ViewMapping<SrcTraits, void> const& src, Args... args) {
+    static_assert(ViewMapping<DstTraits, traits_type, void>::is_assignable,
+                  "Subview destination type must be compatible with subview "
+                  "derived type");
+
+    using DstType = ViewMapping<DstTraits, void>;
+
+    using dst_offset_type = typename DstType::offset_type;
+
+    const SubviewExtents<SrcTraits::rank, rank> extents(src.m_impl_offset.m_dim,
+                                                        args...);
+
+    dst.m_impl_offset = dst_offset_type(src.m_impl_offset, extents);
+
+    dst.m_impl_handle = ViewDataHandle<DstTraits>::assign(
+        src.m_impl_handle,
+        src.m_impl_offset(extents.domain_offset(0), extents.domain_offset(1),
+                          extents.domain_offset(2), extents.domain_offset(3),
+                          extents.domain_offset(4), extents.domain_offset(5),
+                          extents.domain_offset(6), extents.domain_offset(7)));
+  }
+};
+
+//----------------------------------------------------------------------------
+
+}  // namespace Impl
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+namespace Kokkos {
+namespace Impl {
+
+template <unsigned, class MapType>
+KOKKOS_INLINE_FUNCTION bool view_verify_operator_bounds(const MapType&) {
+  return true;
+}
+
+template <unsigned R, class MapType, class iType, class... Args>
+KOKKOS_INLINE_FUNCTION bool view_verify_operator_bounds(const MapType& map,
+                                                        const iType& i,
+                                                        Args... args) {
+  return (size_t(i) < map.extent(R)) &&
+         view_verify_operator_bounds<R + 1>(map, args...);
+}
+
+template <unsigned, class MapType>
+inline void view_error_operator_bounds(char*, int, const MapType&) {}
+
+template <unsigned R, class MapType, class iType, class... Args>
+inline void view_error_operator_bounds(char* buf, int len, const MapType& map,
+                                       const iType& i, Args... args) {
+  const int n = snprintf(
+      buf, len, " %ld < %ld %c", static_cast<unsigned long>(i),
+      static_cast<unsigned long>(map.extent(R)), (sizeof...(Args) ? ',' : ')'));
+  view_error_operator_bounds<R + 1>(buf + n, len - n, map, args...);
+}
+
+/* Check #3: is the View managed as determined by the MemoryTraits? */
+template <class MapType, bool is_managed = (MapType::is_managed != 0)>
+struct OperatorBoundsErrorOnDevice;
+
+template <class MapType>
+struct OperatorBoundsErrorOnDevice<MapType, false> {
+  KOKKOS_INLINE_FUNCTION
+  static void run(MapType const&) { Kokkos::abort("View bounds error"); }
+};
+
+template <class MapType>
+struct OperatorBoundsErrorOnDevice<MapType, true> {
+  KOKKOS_INLINE_FUNCTION
+  static void run(MapType const& map) {
+    SharedAllocationHeader const* const header =
+        SharedAllocationHeader::get_header(
+            static_cast<void const*>(map.data()));
+    char const* const label = header->label();
+    enum { LEN = 128 };
+    char msg[LEN];
+    char const* const first_part = "View bounds error of view ";
+    char* p                      = msg;
+    char* const end              = msg + LEN - 1;
+    for (char const* p2 = first_part; (*p2 != '\0') && (p < end); ++p, ++p2) {
+      *p = *p2;
+    }
+    for (char const* p2 = label; (*p2 != '\0') && (p < end); ++p, ++p2) {
+      *p = *p2;
+    }
+    *p = '\0';
+    Kokkos::abort(msg);
+  }
+};
+
+/* Check #2: does the ViewMapping have the printable_label_typedef defined?
+   See above that only the non-specialized standard-layout ViewMapping has
+   this defined by default.
+   The existence of this alias indicates the existence of MapType::is_managed
+ */
+template <class T>
+using printable_label_typedef_t = typename T::printable_label_typedef;
+
+template <class Map>
+KOKKOS_FUNCTION
+    std::enable_if_t<!is_detected<printable_label_typedef_t, Map>::value>
+    operator_bounds_error_on_device(Map const&) {
+  Kokkos::abort("View bounds error");
+}
+
+template <class Map>
+KOKKOS_FUNCTION
+    std::enable_if_t<is_detected<printable_label_typedef_t, Map>::value>
+    operator_bounds_error_on_device(Map const& map) {
+  OperatorBoundsErrorOnDevice<Map>::run(map);
+}
+
+template <class MemorySpace, class ViewType, class MapType, class... Args>
+KOKKOS_INLINE_FUNCTION void view_verify_operator_bounds(
+    Kokkos::Impl::ViewTracker<ViewType> const& tracker, const MapType& map,
+    Args... args) {
+  if (!view_verify_operator_bounds<0>(map, args...)) {
+    KOKKOS_IF_ON_HOST(
+        (enum {LEN = 1024}; char buffer[LEN];
+         const std::string label =
+             tracker.m_tracker.template get_label<MemorySpace>();
+         int n = snprintf(buffer, LEN, "View bounds error of view %s (",
+                          label.c_str());
+         view_error_operator_bounds<0>(buffer + n, LEN - n, map, args...);
+         Kokkos::Impl::throw_runtime_exception(std::string(buffer));))
+
+    KOKKOS_IF_ON_DEVICE((
+        /* Check #1: is there a SharedAllocationRecord?
+           (we won't use it, but if its not there then there isn't
+            a corresponding SharedAllocationHeader containing a label).
+           This check should cover the case of Views that don't
+           have the Unmanaged trait but were initialized by pointer. */
+        if (tracker.m_tracker.has_record()) {
+          operator_bounds_error_on_device(map);
+        } else { Kokkos::abort("View bounds error"); }))
+  }
+}
+
+// primary template: memory space is accessible, do nothing.
+template <class MemorySpace, class AccessSpace,
+          bool = SpaceAccessibility<AccessSpace, MemorySpace>::accessible>
+struct RuntimeCheckViewMemoryAccessViolation {
+  template <class Track, class Map>
+  KOKKOS_FUNCTION RuntimeCheckViewMemoryAccessViolation(char const* const,
+                                                        Track const&,
+                                                        Map const&) {}
+};
+
+// explicit specialization: memory access violation will occur, call abort with
+// the specified error message.
+template <class MemorySpace, class AccessSpace>
+struct RuntimeCheckViewMemoryAccessViolation<MemorySpace, AccessSpace, false> {
+  template <class Track, class Map>
+  KOKKOS_FUNCTION RuntimeCheckViewMemoryAccessViolation(char const* const msg,
+                                                        Track const& track,
+                                                        Map const&) {
+    char err[256] = "";
+    strncat(err, msg, 64);
+    strcat(err, " (label=\"");
+
+    KOKKOS_IF_ON_HOST(({
+      auto const tracker = track.m_tracker;
+
+      if (tracker.has_record()) {
+        strncat(err, tracker.template get_label<void>().c_str(), 128);
+      } else {
+        strcat(err, "**UNMANAGED**");
+      }
+    }))
+
+    KOKKOS_IF_ON_DEVICE(({
+      strcat(err, "**UNAVAILABLE**");
+      (void)track;
+    }))
+
+    strcat(err, "\")");
+
+    Kokkos::abort(err);
+  }
+};
+
+template <class MemorySpace, class Track, class Map, class... Ignore>
+KOKKOS_FUNCTION void runtime_check_memory_access_violation(
+    char const* const msg, Track const& track, Map const& map, Ignore...) {
+  KOKKOS_IF_ON_HOST(
+      ((void)RuntimeCheckViewMemoryAccessViolation<MemorySpace,
+                                                   DefaultHostExecutionSpace>(
+           msg, track, map);))
+  KOKKOS_IF_ON_DEVICE(
+      ((void)RuntimeCheckViewMemoryAccessViolation<MemorySpace,
+                                                   DefaultExecutionSpace>(
+           msg, track, map);))
+}
+
+} /* namespace Impl */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif /* #ifndef KOKKOS_EXPERIMENTAL_VIEW_MAPPING_HPP */
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewTracker.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewTracker.hpp
new file mode 100644 (file)
index 0000000..cfa30f6
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_VIEW_TRACKER_HPP
+#define KOKKOS_VIEW_TRACKER_HPP
+
+namespace Kokkos {
+
+template <class DataType, class... Properties>
+class View;
+
+namespace Impl {
+
+/*
+ * \class ViewTracker
+ * \brief template class to wrap the shared allocation tracker
+ *
+ * \section This class is templated on the View and provides
+ * constructors that match the view.  The constructors and assignments
+ * from view will externalize the logic needed to enable/disable
+ * ref counting to provide a single gate to enable further developments
+ * which may hinge on the same logic.
+ *
+ */
+template <class ParentView>
+struct ViewTracker {
+  using track_type  = Kokkos::Impl::SharedAllocationTracker;
+  using view_traits = typename ParentView::traits;
+
+  track_type m_tracker;
+
+  KOKKOS_INLINE_FUNCTION
+  ViewTracker() : m_tracker() {}
+
+  KOKKOS_INLINE_FUNCTION
+  ViewTracker(const ViewTracker& vt) noexcept
+      : m_tracker(vt.m_tracker, view_traits::is_managed) {}
+
+  KOKKOS_INLINE_FUNCTION
+  explicit ViewTracker(const ParentView& vt) noexcept : m_tracker() {
+    assign(vt);
+  }
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION explicit ViewTracker(
+      const View<RT, RP...>& vt) noexcept
+      : m_tracker() {
+    assign(vt);
+  }
+
+  template <class RT, class... RP>
+  KOKKOS_INLINE_FUNCTION void assign(const View<RT, RP...>& vt) noexcept {
+    if (this == reinterpret_cast<const ViewTracker*>(&vt.m_track)) return;
+    KOKKOS_IF_ON_HOST((
+        if (view_traits::is_managed && Kokkos::Impl::SharedAllocationRecord<
+                                           void, void>::tracking_enabled()) {
+          m_tracker.assign_direct(vt.m_track.m_tracker);
+        } else { m_tracker.assign_force_disable(vt.m_track.m_tracker); }))
+
+    KOKKOS_IF_ON_DEVICE((m_tracker.assign_force_disable(vt.m_track.m_tracker);))
+  }
+
+  KOKKOS_INLINE_FUNCTION ViewTracker& operator=(
+      const ViewTracker& rhs) noexcept {
+    if (this == &rhs) return *this;
+    KOKKOS_IF_ON_HOST((
+        if (view_traits::is_managed && Kokkos::Impl::SharedAllocationRecord<
+                                           void, void>::tracking_enabled()) {
+          m_tracker.assign_direct(rhs.m_tracker);
+        } else { m_tracker.assign_force_disable(rhs.m_tracker); }))
+
+    KOKKOS_IF_ON_DEVICE((m_tracker.assign_force_disable(rhs.m_tracker);))
+    return *this;
+  }
+
+  KOKKOS_INLINE_FUNCTION
+  explicit ViewTracker(const track_type& tt) noexcept
+      : m_tracker(tt, view_traits::is_managed) {}
+};
+
+}  // namespace Impl
+
+}  // namespace Kokkos
+
+#endif  // KOKKOS_VIEW_TRACKER_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewUniformType.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_ViewUniformType.hpp
new file mode 100644 (file)
index 0000000..13ed4df
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_EXPERIMENTAL_VIEWUNIFORMTYPE_HPP
+#define KOKKOS_EXPERIMENTAL_VIEWUNIFORMTYPE_HPP
+
+#include <Kokkos_Macros.hpp>
+
+namespace Kokkos {
+namespace Impl {
+template <class ScalarType, int Rank>
+struct ViewScalarToDataType {
+  using type = typename ViewScalarToDataType<ScalarType, Rank - 1>::type *;
+};
+
+template <class ScalarType>
+struct ViewScalarToDataType<ScalarType, 0> {
+  using type = ScalarType;
+};
+
+template <class LayoutType, int Rank>
+struct ViewUniformLayout {
+  using array_layout = LayoutType;
+};
+
+template <class LayoutType>
+struct ViewUniformLayout<LayoutType, 0> {
+  using array_layout = Kokkos::LayoutLeft;
+};
+
+template <>
+struct ViewUniformLayout<Kokkos::LayoutRight, 1> {
+  using array_layout = Kokkos::LayoutLeft;
+};
+
+template <class ViewType, int Traits>
+struct ViewUniformType {
+  using data_type       = typename ViewType::data_type;
+  using const_data_type = std::add_const_t<typename ViewType::data_type>;
+  using runtime_data_type =
+      typename ViewScalarToDataType<typename ViewType::value_type,
+                                    ViewType::rank>::type;
+  using runtime_const_data_type = typename ViewScalarToDataType<
+      std::add_const_t<typename ViewType::value_type>, ViewType::rank>::type;
+
+  using array_layout =
+      typename ViewUniformLayout<typename ViewType::array_layout,
+                                 ViewType::rank>::array_layout;
+
+  using device_type = typename ViewType::device_type;
+  using anonymous_device_type =
+      typename Kokkos::Device<typename device_type::execution_space,
+                              Kokkos::AnonymousSpace>;
+
+  using memory_traits = typename Kokkos::MemoryTraits<Traits>;
+  using type =
+      Kokkos::View<data_type, array_layout, device_type, memory_traits>;
+  using const_type =
+      Kokkos::View<const_data_type, array_layout, device_type, memory_traits>;
+  using runtime_type =
+      Kokkos::View<runtime_data_type, array_layout, device_type, memory_traits>;
+  using runtime_const_type = Kokkos::View<runtime_const_data_type, array_layout,
+                                          device_type, memory_traits>;
+
+  using nomemspace_type = Kokkos::View<data_type, array_layout,
+                                       anonymous_device_type, memory_traits>;
+  using const_nomemspace_type =
+      Kokkos::View<const_data_type, array_layout, anonymous_device_type,
+                   memory_traits>;
+  using runtime_nomemspace_type =
+      Kokkos::View<runtime_data_type, array_layout, anonymous_device_type,
+                   memory_traits>;
+  using runtime_const_nomemspace_type =
+      Kokkos::View<runtime_const_data_type, array_layout, anonymous_device_type,
+                   memory_traits>;
+};
+}  // namespace Impl
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Volatile_Load.hpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_Volatile_Load.hpp
new file mode 100644 (file)
index 0000000..4af26dc
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#include <Kokkos_Macros.hpp>
+
+#if defined(KOKKOS_ATOMIC_HPP) && !defined(KOKKOS_VOLATILE_LOAD_HPP)
+#define KOKKOS_VOLATILE_LOAD_HPP
+
+#if defined(__GNUC__) /* GNU C   */ || defined(__GNUG__) /* GNU C++ */ || \
+    defined(__clang__)
+
+#define KOKKOS_IMPL_MAY_ALIAS __attribute__((__may_alias__))
+
+#else
+
+#define KOKKOS_IMPL_MAY_ALIAS
+
+#endif
+
+namespace Kokkos {
+
+//----------------------------------------------------------------------------
+
+template <typename T>
+KOKKOS_FORCEINLINE_FUNCTION T volatile_load(T const volatile* const src_ptr) {
+  typedef uint64_t KOKKOS_IMPL_MAY_ALIAS T64;  // NOLINT(modernize-use-using)
+  typedef uint32_t KOKKOS_IMPL_MAY_ALIAS T32;  // NOLINT(modernize-use-using)
+  typedef uint16_t KOKKOS_IMPL_MAY_ALIAS T16;  // NOLINT(modernize-use-using)
+  typedef uint8_t KOKKOS_IMPL_MAY_ALIAS T8;    // NOLINT(modernize-use-using)
+
+  enum {
+    NUM_8  = sizeof(T),
+    NUM_16 = NUM_8 / 2,
+    NUM_32 = NUM_8 / 4,
+    NUM_64 = NUM_8 / 8
+  };
+
+  union {
+    T const volatile* const ptr;
+    T64 const volatile* const ptr64;
+    T32 const volatile* const ptr32;
+    T16 const volatile* const ptr16;
+    T8 const volatile* const ptr8;
+  } src = {src_ptr};
+
+  T result;
+
+  union {
+    T* const ptr;
+    T64* const ptr64;
+    T32* const ptr32;
+    T16* const ptr16;
+    T8* const ptr8;
+  } dst = {&result};
+
+  for (int i = 0; i < NUM_64; ++i) {
+    dst.ptr64[i] = src.ptr64[i];
+  }
+
+  if (NUM_64 * 2 < NUM_32) {
+    dst.ptr32[NUM_64 * 2] = src.ptr32[NUM_64 * 2];
+  }
+
+  if (NUM_32 * 2 < NUM_16) {
+    dst.ptr16[NUM_32 * 2] = src.ptr16[NUM_32 * 2];
+  }
+
+  if (NUM_16 * 2 < NUM_8) {
+    dst.ptr8[NUM_16 * 2] = src.ptr8[NUM_16 * 2];
+  }
+
+  return result;
+}
+
+template <typename T>
+KOKKOS_FORCEINLINE_FUNCTION void volatile_store(
+    T volatile* const dst_ptr, T const volatile* const src_ptr) {
+  typedef uint64_t KOKKOS_IMPL_MAY_ALIAS T64;  // NOLINT(modernize-use-using)
+  typedef uint32_t KOKKOS_IMPL_MAY_ALIAS T32;  // NOLINT(modernize-use-using)
+  typedef uint16_t KOKKOS_IMPL_MAY_ALIAS T16;  // NOLINT(modernize-use-using)
+  typedef uint8_t KOKKOS_IMPL_MAY_ALIAS T8;    // NOLINT(modernize-use-using)
+
+  enum {
+    NUM_8  = sizeof(T),
+    NUM_16 = NUM_8 / 2,
+    NUM_32 = NUM_8 / 4,
+    NUM_64 = NUM_8 / 8
+  };
+
+  union {
+    T const volatile* const ptr;
+    T64 const volatile* const ptr64;
+    T32 const volatile* const ptr32;
+    T16 const volatile* const ptr16;
+    T8 const volatile* const ptr8;
+  } src = {src_ptr};
+
+  union {
+    T volatile* const ptr;
+    T64 volatile* const ptr64;
+    T32 volatile* const ptr32;
+    T16 volatile* const ptr16;
+    T8 volatile* const ptr8;
+  } dst = {dst_ptr};
+
+  for (int i = 0; i < NUM_64; ++i) {
+    dst.ptr64[i] = src.ptr64[i];
+  }
+
+  if (NUM_64 * 2 < NUM_32) {
+    dst.ptr32[NUM_64 * 2] = src.ptr32[NUM_64 * 2];
+  }
+
+  if (NUM_32 * 2 < NUM_16) {
+    dst.ptr16[NUM_32 * 2] = src.ptr16[NUM_32 * 2];
+  }
+
+  if (NUM_16 * 2 < NUM_8) {
+    dst.ptr8[NUM_16 * 2] = src.ptr8[NUM_16 * 2];
+  }
+}
+
+template <typename T>
+KOKKOS_FORCEINLINE_FUNCTION void volatile_store(T volatile* const dst_ptr,
+                                                T const* const src_ptr) {
+  typedef uint64_t KOKKOS_IMPL_MAY_ALIAS T64;  // NOLINT(modernize-use-using)
+  typedef uint32_t KOKKOS_IMPL_MAY_ALIAS T32;  // NOLINT(modernize-use-using)
+  typedef uint16_t KOKKOS_IMPL_MAY_ALIAS T16;  // NOLINT(modernize-use-using)
+  typedef uint8_t KOKKOS_IMPL_MAY_ALIAS T8;    // NOLINT(modernize-use-using)
+
+  enum {
+    NUM_8  = sizeof(T),
+    NUM_16 = NUM_8 / 2,
+    NUM_32 = NUM_8 / 4,
+    NUM_64 = NUM_8 / 8
+  };
+
+  union {
+    T const* const ptr;
+    T64 const* const ptr64;
+    T32 const* const ptr32;
+    T16 const* const ptr16;
+    T8 const* const ptr8;
+  } src = {src_ptr};
+
+  union {
+    T volatile* const ptr;
+    T64 volatile* const ptr64;
+    T32 volatile* const ptr32;
+    T16 volatile* const ptr16;
+    T8 volatile* const ptr8;
+  } dst = {dst_ptr};
+
+  for (int i = 0; i < NUM_64; ++i) {
+    dst.ptr64[i] = src.ptr64[i];
+  }
+
+  if (NUM_64 * 2 < NUM_32) {
+    dst.ptr32[NUM_64 * 2] = src.ptr32[NUM_64 * 2];
+  }
+
+  if (NUM_32 * 2 < NUM_16) {
+    dst.ptr16[NUM_32 * 2] = src.ptr16[NUM_32 * 2];
+  }
+
+  if (NUM_16 * 2 < NUM_8) {
+    dst.ptr8[NUM_16 * 2] = src.ptr8[NUM_16 * 2];
+  }
+}
+
+template <typename T>
+KOKKOS_FORCEINLINE_FUNCTION void volatile_store(T volatile* dst_ptr,
+                                                T const volatile& src) {
+  volatile_store(dst_ptr, &src);
+}
+
+template <typename T>
+KOKKOS_FORCEINLINE_FUNCTION void volatile_store(T volatile* dst_ptr,
+                                                T const& src) {
+  volatile_store(dst_ptr, &src);
+}
+
+template <typename T>
+KOKKOS_FORCEINLINE_FUNCTION T safe_load(T const* const ptr) {
+#if !defined(__MIC__)
+  return *ptr;
+#else
+  return volatile_load(ptr);
+#endif
+}
+
+}  // namespace Kokkos
+
+#undef KOKKOS_IMPL_MAY_ALIAS
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/impl/Kokkos_hwloc.cpp b/bundled/kokkos-3.7.00/core/src/impl/Kokkos_hwloc.cpp
new file mode 100644 (file)
index 0000000..a0d1bc4
--- /dev/null
@@ -0,0 +1,750 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_IMPL_PUBLIC_INCLUDE
+#define KOKKOS_IMPL_PUBLIC_INCLUDE
+#endif
+
+#define DEBUG_PRINT 0
+
+#include <iostream>
+#include <sstream>
+#include <algorithm>
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Core.hpp>
+#include <Kokkos_hwloc.hpp>
+#include <impl/Kokkos_Error.hpp>
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace hwloc {
+
+/* Return 0 if asynchronous, 1 if synchronous and include process. */
+unsigned thread_mapping(const char* const label, const bool allow_async,
+                        unsigned& thread_count, unsigned& use_numa_count,
+                        unsigned& use_cores_per_numa,
+                        std::pair<unsigned, unsigned> threads_coord[]) {
+  const bool hwloc_avail = Kokkos::hwloc::available();
+  const unsigned avail_numa_count =
+      hwloc_avail ? hwloc::get_available_numa_count() : 1;
+  const unsigned avail_cores_per_numa =
+      hwloc_avail ? hwloc::get_available_cores_per_numa() : thread_count;
+  const unsigned avail_threads_per_core =
+      hwloc_avail ? hwloc::get_available_threads_per_core() : 1;
+
+  // (numa,core) coordinate of the process:
+  const std::pair<unsigned, unsigned> proc_coord =
+      Kokkos::hwloc::get_this_thread_coordinate();
+
+  //------------------------------------------------------------------------
+  // Defaults for unspecified inputs:
+
+  if (!use_numa_count) {
+    // Default to use all NUMA regions
+    use_numa_count = !thread_count
+                         ? avail_numa_count
+                         : (thread_count < avail_numa_count ? thread_count
+                                                            : avail_numa_count);
+  }
+
+  if (!use_cores_per_numa) {
+    // Default to use all but one core if asynchronous, all cores if
+    // synchronous.
+    const unsigned threads_per_numa = thread_count / use_numa_count;
+
+    use_cores_per_numa =
+        !threads_per_numa
+            ? avail_cores_per_numa - (allow_async ? 1 : 0)
+            : (threads_per_numa < avail_cores_per_numa ? threads_per_numa
+                                                       : avail_cores_per_numa);
+  }
+
+  if (!thread_count) {
+    thread_count = use_numa_count * use_cores_per_numa * avail_threads_per_core;
+  }
+
+  //------------------------------------------------------------------------
+  // Input verification:
+
+  const bool valid_numa = use_numa_count <= avail_numa_count;
+  const bool valid_cores =
+      use_cores_per_numa && use_cores_per_numa <= avail_cores_per_numa;
+  const bool valid_threads =
+      thread_count && thread_count <= use_numa_count * use_cores_per_numa *
+                                          avail_threads_per_core;
+  const bool balanced_numa = !(thread_count % use_numa_count);
+  const bool balanced_cores =
+      !(thread_count % (use_numa_count * use_cores_per_numa));
+
+  const bool valid_input = valid_numa && valid_cores && valid_threads &&
+                           balanced_numa && balanced_cores;
+
+  if (!valid_input) {
+    std::ostringstream msg;
+
+    msg << label << " HWLOC ERROR(s)";
+
+    if (!valid_threads) {
+      msg << " : thread_count(" << thread_count << ") exceeds capacity("
+          << use_numa_count * use_cores_per_numa * avail_threads_per_core
+          << ")";
+    }
+    if (!valid_numa) {
+      msg << " : use_numa_count(" << use_numa_count << ") exceeds capacity("
+          << avail_numa_count << ")";
+    }
+    if (!valid_cores) {
+      msg << " : use_cores_per_numa(" << use_cores_per_numa
+          << ") exceeds capacity(" << avail_cores_per_numa << ")";
+    }
+    if (!balanced_numa) {
+      msg << " : thread_count(" << thread_count << ") imbalanced among numa("
+          << use_numa_count << ")";
+    }
+    if (!balanced_cores) {
+      msg << " : thread_count(" << thread_count << ") imbalanced among cores("
+          << use_numa_count * use_cores_per_numa << ")";
+    }
+
+    Kokkos::Impl::throw_runtime_exception(msg.str());
+  }
+
+  const unsigned thread_spawn_synchronous =
+      (allow_async && 1 < thread_count &&
+       (use_numa_count < avail_numa_count ||
+        use_cores_per_numa < avail_cores_per_numa))
+          ? 0 /* asyncronous */
+          : 1 /* synchronous, threads_coord[0] is process core */;
+
+  // Determine binding coordinates for to-be-spawned threads so that
+  // threads may be bound to cores as they are spawned.
+
+  const unsigned threads_per_core =
+      thread_count / (use_numa_count * use_cores_per_numa);
+
+  if (thread_spawn_synchronous) {
+    // Working synchronously and include process core as threads_coord[0].
+    // Swap the NUMA coordinate of the process core with 0
+    // Swap the CORE coordinate of the process core with 0
+    for (unsigned i = 0, inuma = avail_numa_count - use_numa_count;
+         inuma < avail_numa_count; ++inuma) {
+      const unsigned numa_coord = 0 == inuma
+                                      ? proc_coord.first
+                                      : (proc_coord.first == inuma ? 0 : inuma);
+      for (unsigned icore = avail_cores_per_numa - use_cores_per_numa;
+           icore < avail_cores_per_numa; ++icore) {
+        const unsigned core_coord =
+            0 == icore ? proc_coord.second
+                       : (proc_coord.second == icore ? 0 : icore);
+        for (unsigned ith = 0; ith < threads_per_core; ++ith, ++i) {
+          threads_coord[i].first  = numa_coord;
+          threads_coord[i].second = core_coord;
+        }
+      }
+    }
+  } else if (use_numa_count < avail_numa_count) {
+    // Working asynchronously and omit the process' NUMA region from the pool.
+    // Swap the NUMA coordinate of the process core with ( ( avail_numa_count -
+    // use_numa_count ) - 1 )
+    const unsigned numa_coord_swap = (avail_numa_count - use_numa_count) - 1;
+    for (unsigned i = 0, inuma = avail_numa_count - use_numa_count;
+         inuma < avail_numa_count; ++inuma) {
+      const unsigned numa_coord =
+          proc_coord.first == inuma ? numa_coord_swap : inuma;
+      for (unsigned icore = avail_cores_per_numa - use_cores_per_numa;
+           icore < avail_cores_per_numa; ++icore) {
+        const unsigned core_coord = icore;
+        for (unsigned ith = 0; ith < threads_per_core; ++ith, ++i) {
+          threads_coord[i].first  = numa_coord;
+          threads_coord[i].second = core_coord;
+        }
+      }
+    }
+  } else if (use_cores_per_numa < avail_cores_per_numa) {
+    // Working asynchronously and omit the process' core from the pool.
+    // Swap the CORE coordinate of the process core with ( (
+    // avail_cores_per_numa - use_cores_per_numa ) - 1 )
+    const unsigned core_coord_swap =
+        (avail_cores_per_numa - use_cores_per_numa) - 1;
+    for (unsigned i = 0, inuma = avail_numa_count - use_numa_count;
+         inuma < avail_numa_count; ++inuma) {
+      const unsigned numa_coord = inuma;
+      for (unsigned icore = avail_cores_per_numa - use_cores_per_numa;
+           icore < avail_cores_per_numa; ++icore) {
+        const unsigned core_coord =
+            proc_coord.second == icore ? core_coord_swap : icore;
+        for (unsigned ith = 0; ith < threads_per_core; ++ith, ++i) {
+          threads_coord[i].first  = numa_coord;
+          threads_coord[i].second = core_coord;
+        }
+      }
+    }
+  }
+
+  return thread_spawn_synchronous;
+}
+
+} /* namespace hwloc */
+} /* namespace Kokkos */
+
+/*--------------------------------------------------------------------------*/
+/*--------------------------------------------------------------------------*/
+
+#if defined(KOKKOS_ENABLE_HWLOC)
+
+#include <iostream>
+#include <sstream>
+
+/*--------------------------------------------------------------------------*/
+/* Third Party Libraries */
+
+/* Hardware locality library: http://www.open-mpi.org/projects/hwloc/ */
+#include <hwloc.h>
+
+#define REQUIRED_HWLOC_API_VERSION 0x000010300
+
+#if HWLOC_API_VERSION < REQUIRED_HWLOC_API_VERSION
+#error \
+    "Requires  http://www.open-mpi.org/projects/hwloc/  Version 1.3 or greater"
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+namespace Kokkos {
+namespace hwloc {
+namespace {
+
+#if DEBUG_PRINT
+
+inline void print_bitmap(std::ostream& s, const hwloc_const_bitmap_t bitmap) {
+  s << "{";
+  for (int i = hwloc_bitmap_first(bitmap); - 1 != i;
+       i     = hwloc_bitmap_next(bitmap, i)) {
+    s << " " << i;
+  }
+  s << " }";
+}
+
+#endif
+
+enum { MAX_CORE = 1024 };
+
+std::pair<unsigned, unsigned> s_core_topology(0, 0);
+unsigned s_core_capacity(0);
+hwloc_topology_t s_hwloc_topology(nullptr);
+hwloc_bitmap_t s_hwloc_location(nullptr);
+hwloc_bitmap_t s_process_binding(nullptr);
+hwloc_bitmap_t s_core[MAX_CORE];
+bool s_can_bind_threads(true);
+
+struct Sentinel {
+  ~Sentinel();
+  Sentinel();
+};
+
+bool sentinel() {
+  static Sentinel self;
+
+  if (nullptr == s_hwloc_topology) {
+    std::cerr << "Kokkos::hwloc ERROR : Called after return from main()"
+              << std::endl;
+    std::cerr.flush();
+  }
+
+  return nullptr != s_hwloc_topology;
+}
+
+Sentinel::~Sentinel() {
+  hwloc_topology_destroy(s_hwloc_topology);
+  hwloc_bitmap_free(s_process_binding);
+  hwloc_bitmap_free(s_hwloc_location);
+
+  s_core_topology.first  = 0;
+  s_core_topology.second = 0;
+  s_core_capacity        = 0;
+  s_hwloc_topology       = nullptr;
+  s_hwloc_location       = nullptr;
+  s_process_binding      = nullptr;
+}
+
+Sentinel::Sentinel() {
+#if defined(__MIC__)
+  static const bool remove_core_0 = true;
+#else
+  static const bool remove_core_0 = false;
+#endif
+
+  s_core_topology   = std::pair<unsigned, unsigned>(0, 0);
+  s_core_capacity   = 0;
+  s_hwloc_topology  = nullptr;
+  s_hwloc_location  = nullptr;
+  s_process_binding = nullptr;
+
+  for (unsigned i = 0; i < MAX_CORE; ++i) s_core[i] = nullptr;
+
+  hwloc_topology_init(&s_hwloc_topology);
+  hwloc_topology_load(s_hwloc_topology);
+
+  s_hwloc_location  = hwloc_bitmap_alloc();
+  s_process_binding = hwloc_bitmap_alloc();
+
+  hwloc_get_cpubind(s_hwloc_topology, s_process_binding, HWLOC_CPUBIND_PROCESS);
+
+  if (hwloc_bitmap_iszero(s_process_binding)) {
+    if (Kokkos::show_warnings()) {
+      std::cerr << "WARNING: Cannot detect process binding -- ASSUMING ALL "
+                   "processing units"
+                << std::endl;
+    }
+    const int pu_depth = hwloc_get_type_depth(s_hwloc_topology, HWLOC_OBJ_PU);
+    int num_pu         = 1;
+    if (pu_depth != HWLOC_TYPE_DEPTH_UNKNOWN) {
+      num_pu = hwloc_get_nbobjs_by_depth(s_hwloc_topology, pu_depth);
+    } else {
+      if (Kokkos::show_warnings()) {
+        std::cerr << "WARNING: Cannot detect number of processing units -- "
+                     "ASSUMING 1 (serial)."
+                  << std::endl;
+      }
+      num_pu = 1;
+    }
+    hwloc_bitmap_set_range(s_process_binding, 0, num_pu - 1);
+    s_can_bind_threads = false;
+  }
+
+  if (remove_core_0) {
+    const hwloc_obj_t core =
+        hwloc_get_obj_by_type(s_hwloc_topology, HWLOC_OBJ_CORE, 0);
+
+    if (hwloc_bitmap_intersects(s_process_binding, core->cpuset)) {
+      hwloc_bitmap_t s_process_no_core_zero = hwloc_bitmap_alloc();
+
+      hwloc_bitmap_andnot(s_process_no_core_zero, s_process_binding,
+                          core->cpuset);
+
+      bool ok =
+          0 == hwloc_set_cpubind(s_hwloc_topology, s_process_no_core_zero,
+                                 HWLOC_CPUBIND_PROCESS | HWLOC_CPUBIND_STRICT);
+
+      if (ok) {
+        hwloc_get_cpubind(s_hwloc_topology, s_process_binding,
+                          HWLOC_CPUBIND_PROCESS);
+
+        ok = 0 !=
+             hwloc_bitmap_isequal(s_process_binding, s_process_no_core_zero);
+      }
+
+      hwloc_bitmap_free(s_process_no_core_zero);
+
+      if (Kokkos::show_warnings() && !ok) {
+        std::cerr << "WARNING: Kokkos::hwloc attempted and failed to move "
+                     "process off of core #0"
+                  << std::endl;
+      }
+    }
+  }
+
+  // Choose a hwloc object type for the NUMA level, which may not exist.
+
+  hwloc_obj_type_t root_type = HWLOC_OBJ_TYPE_MAX;
+
+  {
+    // Object types to search, in order.
+    static const hwloc_obj_type_t candidate_root_type[] = {
+        HWLOC_OBJ_NODE /* NUMA region     */
+        ,
+        HWLOC_OBJ_SOCKET /* hardware socket */
+        ,
+        HWLOC_OBJ_MACHINE /* local machine   */
+    };
+
+    enum {
+      CANDIDATE_ROOT_TYPE_COUNT =
+          sizeof(candidate_root_type) / sizeof(hwloc_obj_type_t)
+    };
+
+    for (int k = 0;
+         k < CANDIDATE_ROOT_TYPE_COUNT && HWLOC_OBJ_TYPE_MAX == root_type;
+         ++k) {
+      if (0 <
+          hwloc_get_nbobjs_by_type(s_hwloc_topology, candidate_root_type[k])) {
+        root_type = candidate_root_type[k];
+      }
+    }
+  }
+
+  // Determine which of these 'root' types are available to this process.
+  // The process may have been bound (e.g., by MPI) to a subset of these root
+  // types. Determine current location of the master (calling) process>
+
+  hwloc_bitmap_t proc_cpuset_location = hwloc_bitmap_alloc();
+
+  hwloc_get_last_cpu_location(s_hwloc_topology, proc_cpuset_location,
+                              HWLOC_CPUBIND_THREAD);
+
+  const unsigned max_root =
+      hwloc_get_nbobjs_by_type(s_hwloc_topology, root_type);
+
+  unsigned root_base     = max_root;
+  unsigned root_count    = 0;
+  unsigned core_per_root = 0;
+  unsigned pu_per_core   = 0;
+  bool symmetric         = true;
+
+  for (unsigned i = 0; i < max_root; ++i) {
+    const hwloc_obj_t root =
+        hwloc_get_obj_by_type(s_hwloc_topology, root_type, i);
+
+    if (hwloc_bitmap_intersects(s_process_binding, root->cpuset)) {
+      ++root_count;
+
+      // Remember which root (NUMA) object the master thread is running on.
+      // This will be logical NUMA rank #0 for this process.
+
+      if (hwloc_bitmap_intersects(proc_cpuset_location, root->cpuset)) {
+        root_base = i;
+      }
+
+      // Count available cores:
+
+      const unsigned max_core = hwloc_get_nbobjs_inside_cpuset_by_type(
+          s_hwloc_topology, root->cpuset, HWLOC_OBJ_CORE);
+
+      unsigned core_count = 0;
+
+      for (unsigned j = 0; j < max_core; ++j) {
+        const hwloc_obj_t core = hwloc_get_obj_inside_cpuset_by_type(
+            s_hwloc_topology, root->cpuset, HWLOC_OBJ_CORE, j);
+
+        // If process' cpuset intersects core's cpuset then process can access
+        // this core. Must use intersection instead of inclusion because the
+        // Intel-Phi MPI may bind the process to only one of the core's
+        // hyperthreads.
+        //
+        // Assumption: if the process can access any hyperthread of the core
+        // then it has ownership of the entire core.
+        // This assumes that it would be performance-detrimental
+        // to spawn more than one MPI process per core and use nested threading.
+
+        if (hwloc_bitmap_intersects(s_process_binding, core->cpuset)) {
+          ++core_count;
+
+          const unsigned pu_count = hwloc_get_nbobjs_inside_cpuset_by_type(
+              s_hwloc_topology, core->cpuset, HWLOC_OBJ_PU);
+
+          if (pu_per_core == 0) pu_per_core = pu_count;
+
+          // Enforce symmetry by taking the minimum:
+
+          pu_per_core = std::min(pu_per_core, pu_count);
+
+          if (pu_count != pu_per_core) symmetric = false;
+        }
+      }
+
+      if (0 == core_per_root) core_per_root = core_count;
+
+      // Enforce symmetry by taking the minimum:
+
+      core_per_root = std::min(core_per_root, core_count);
+
+      if (core_count != core_per_root) symmetric = false;
+    }
+  }
+
+  s_core_topology.first  = root_count;
+  s_core_topology.second = core_per_root;
+  s_core_capacity        = pu_per_core;
+
+  // Fill the 's_core' array for fast mapping from a core coordinate to the
+  // hwloc cpuset object required for thread location querying and binding.
+
+  for (unsigned i = 0; i < max_root; ++i) {
+    const unsigned root_rank = (i + root_base) % max_root;
+
+    const hwloc_obj_t root =
+        hwloc_get_obj_by_type(s_hwloc_topology, root_type, root_rank);
+
+    if (hwloc_bitmap_intersects(s_process_binding, root->cpuset)) {
+      const unsigned max_core = hwloc_get_nbobjs_inside_cpuset_by_type(
+          s_hwloc_topology, root->cpuset, HWLOC_OBJ_CORE);
+
+      unsigned core_count = 0;
+
+      for (unsigned j = 0; j < max_core && core_count < core_per_root; ++j) {
+        const hwloc_obj_t core = hwloc_get_obj_inside_cpuset_by_type(
+            s_hwloc_topology, root->cpuset, HWLOC_OBJ_CORE, j);
+
+        if (hwloc_bitmap_intersects(s_process_binding, core->cpuset)) {
+          s_core[core_count + core_per_root * i] = core->cpuset;
+
+          ++core_count;
+        }
+      }
+    }
+  }
+
+  hwloc_bitmap_free(proc_cpuset_location);
+
+  if (Kokkos::show_warnings() && !symmetric) {
+    std::cerr << "Kokkos::hwloc WARNING: Using a symmetric subset of a "
+                 "non-symmetric core topology."
+              << std::endl;
+  }
+}
+
+}  // namespace
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+bool available() { return true; }
+
+unsigned get_available_numa_count() {
+  sentinel();
+  return s_core_topology.first;
+}
+
+unsigned get_available_cores_per_numa() {
+  sentinel();
+  return s_core_topology.second;
+}
+
+unsigned get_available_threads_per_core() {
+  sentinel();
+  return s_core_capacity;
+}
+
+bool can_bind_threads() {
+  sentinel();
+  return s_can_bind_threads;
+}
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+unsigned bind_this_thread(const unsigned coordinate_count,
+                          std::pair<unsigned, unsigned> coordinate[]) {
+  unsigned i = 0;
+
+  try {
+    const std::pair<unsigned, unsigned> current = get_this_thread_coordinate();
+
+    // Match one of the requests:
+    for (i = 0; i < coordinate_count && current != coordinate[i]; ++i)
+      ;
+
+    if (coordinate_count == i) {
+      // Match the first request (typically NUMA):
+      for (i = 0; i < coordinate_count && current.first != coordinate[i].first;
+           ++i)
+        ;
+    }
+
+    if (coordinate_count == i) {
+      // Match any unclaimed request:
+      for (i = 0; i < coordinate_count && ~0u == coordinate[i].first; ++i)
+        ;
+    }
+
+    if (coordinate_count == i || !bind_this_thread(coordinate[i])) {
+      // Failed to bind:
+      i = ~0u;
+    }
+
+    if (i < coordinate_count) {
+#if DEBUG_PRINT
+      if (current != coordinate[i]) {
+        std::cout << "  bind_this_thread: rebinding from (" << current.first
+                  << "," << current.second << ") to (" << coordinate[i].first
+                  << "," << coordinate[i].second << ")" << std::endl;
+      }
+#endif
+
+      coordinate[i].first  = ~0u;
+      coordinate[i].second = ~0u;
+    }
+  } catch (...) {
+    i = ~0u;
+  }
+
+  return i;
+}
+
+bool bind_this_thread(const std::pair<unsigned, unsigned> coord) {
+  if (!sentinel()) return false;
+
+#if DEBUG_PRINT
+
+  std::cout << "Kokkos::bind_this_thread() at ";
+
+  hwloc_get_last_cpu_location(s_hwloc_topology, s_hwloc_location,
+                              HWLOC_CPUBIND_THREAD);
+
+  print_bitmap(std::cout, s_hwloc_location);
+
+  std::cout << " to ";
+
+  print_bitmap(std::cout,
+               s_core[coord.second + coord.first * s_core_topology.second]);
+
+  std::cout << std::endl;
+
+#endif
+
+  // As safe and fast as possible.
+  // Fast-lookup by caching the coordinate -> hwloc cpuset mapping in 's_core'.
+  return coord.first < s_core_topology.first &&
+         coord.second < s_core_topology.second &&
+         0 == hwloc_set_cpubind(
+                  s_hwloc_topology,
+                  s_core[coord.second + coord.first * s_core_topology.second],
+                  HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_STRICT);
+}
+
+bool unbind_this_thread() {
+  if (!sentinel()) return false;
+
+#define HWLOC_DEBUG_PRINT 0
+
+#if HWLOC_DEBUG_PRINT
+
+  std::cout << "Kokkos::unbind_this_thread() from ";
+
+  hwloc_get_cpubind(s_hwloc_topology, s_hwloc_location, HWLOC_CPUBIND_THREAD);
+
+  print_bitmap(std::cout, s_hwloc_location);
+
+#endif
+
+  const bool result =
+      s_hwloc_topology &&
+      0 == hwloc_set_cpubind(s_hwloc_topology, s_process_binding,
+                             HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_STRICT);
+
+#if HWLOC_DEBUG_PRINT
+
+  std::cout << " to ";
+
+  hwloc_get_cpubind(s_hwloc_topology, s_hwloc_location, HWLOC_CPUBIND_THREAD);
+
+  print_bitmap(std::cout, s_hwloc_location);
+
+  std::cout << std::endl;
+
+#endif
+
+  return result;
+
+#undef HWLOC_DEBUG_PRINT
+}
+
+//----------------------------------------------------------------------------
+
+std::pair<unsigned, unsigned> get_this_thread_coordinate() {
+  std::pair<unsigned, unsigned> coord(0u, 0u);
+
+  if (!sentinel()) return coord;
+
+  const unsigned n = s_core_topology.first * s_core_topology.second;
+
+  // Using the pre-allocated 's_hwloc_location' to avoid memory
+  // allocation by this thread.  This call is NOT thread-safe.
+  hwloc_get_last_cpu_location(s_hwloc_topology, s_hwloc_location,
+                              HWLOC_CPUBIND_THREAD);
+
+  unsigned i = 0;
+
+  while (i < n && !hwloc_bitmap_intersects(s_hwloc_location, s_core[i])) ++i;
+
+  if (i < n) {
+    coord.first  = i / s_core_topology.second;
+    coord.second = i % s_core_topology.second;
+  }
+
+  return coord;
+}
+
+//----------------------------------------------------------------------------
+
+} /* namespace hwloc */
+} /* namespace Kokkos */
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#else /* ! defined( KOKKOS_ENABLE_HWLOC ) */
+
+namespace Kokkos {
+namespace hwloc {
+
+bool available() { return false; }
+bool can_bind_threads() { return false; }
+
+unsigned get_available_numa_count() { return 1; }
+unsigned get_available_cores_per_numa() { return 1; }
+unsigned get_available_threads_per_core() { return 1; }
+
+unsigned bind_this_thread(const unsigned, std::pair<unsigned, unsigned>[]) {
+  return ~0;
+}
+
+bool bind_this_thread(const std::pair<unsigned, unsigned>) { return false; }
+
+bool unbind_this_thread() { return true; }
+
+std::pair<unsigned, unsigned> get_this_thread_coordinate() {
+  return std::pair<unsigned, unsigned>(0, 0);
+}
+
+}  // namespace hwloc
+}  // namespace Kokkos
+
+//----------------------------------------------------------------------------
+//----------------------------------------------------------------------------
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_Cuda.hpp b/bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_Cuda.hpp
new file mode 100644 (file)
index 0000000..983a71a
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_CUDA_SETUP_HPP_
+#define KOKKOS_CUDA_SETUP_HPP_
+
+#if !defined(KOKKOS_ENABLE_CUDA)
+#error \
+    "KOKKOS_ENABLE_CUDA was not defined, but Kokkos_Setup_Cuda.hpp was included anyway."
+#endif
+
+#if defined(KOKKOS_ENABLE_CUDA) && !defined(__CUDACC__)
+#error \
+    "KOKKOS_ENABLE_CUDA defined but the compiler is not defining the __CUDACC__ macro as expected"
+// Some tooling environments will still function better if we do this here.
+#define __CUDACC__
+#endif /* defined(KOKKOS_ENABLE_CUDA) && !defined(__CUDACC__) */
+
+// Compiling with a CUDA compiler.
+//
+//  Include <cuda.h> to pick up the CUDA_VERSION macro defined as:
+//    CUDA_VERSION = ( MAJOR_VERSION * 1000 ) + ( MINOR_VERSION * 10 )
+//
+//  When generating device code the __CUDA_ARCH__ macro is defined as:
+//    __CUDA_ARCH__ = ( MAJOR_CAPABILITY * 100 ) + ( MINOR_CAPABILITY * 10 )
+
+#include <cuda_runtime.h>
+#include <cuda.h>
+
+#if defined(_WIN32)
+#define KOKKOS_IMPL_WINDOWS_CUDA
+#endif
+
+#if !defined(CUDA_VERSION)
+#error "#include <cuda.h> did not define CUDA_VERSION."
+#endif
+
+#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 300)
+// Compiling with CUDA compiler for device code.
+#error "Cuda device capability >= 3.0 is required."
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA_LAMBDA
+#define KOKKOS_LAMBDA [=] __host__ __device__
+
+#if defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)
+#define KOKKOS_CLASS_LAMBDA [ =, *this ] __host__ __device__
+#endif
+
+#else  // !defined(KOKKOS_ENABLE_CUDA_LAMBDA)
+#undef KOKKOS_ENABLE_CXX11_DISPATCH_LAMBDA
+#endif  // !defined(KOKKOS_ENABLE_CUDA_LAMBDA)
+
+#if (10000 > CUDA_VERSION)
+#define KOKKOS_ENABLE_PRE_CUDA_10_DEPRECATION_API
+#endif
+
+#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)
+// PTX atomics with memory order semantics are only available on volta and later
+#if !defined(KOKKOS_DISABLE_CUDA_ASM)
+#if !defined(KOKKOS_ENABLE_CUDA_ASM)
+#define KOKKOS_ENABLE_CUDA_ASM
+#if !defined(KOKKOS_DISABLE_CUDA_ASM_ATOMICS) && \
+    defined(KOKKOS_ENABLE_GNU_ATOMICS)
+#define KOKKOS_ENABLE_CUDA_ASM_ATOMICS
+#endif
+#endif
+#endif
+#endif
+
+#define KOKKOS_IMPL_FORCEINLINE_FUNCTION __device__ __host__ __forceinline__
+#define KOKKOS_IMPL_FORCEINLINE __forceinline__
+#define KOKKOS_IMPL_INLINE_FUNCTION __device__ __host__ inline
+#define KOKKOS_IMPL_FUNCTION __device__ __host__
+#define KOKKOS_IMPL_HOST_FUNCTION __host__
+#define KOKKOS_IMPL_DEVICE_FUNCTION __device__
+#if defined(KOKKOS_COMPILER_NVCC)
+#define KOKKOS_INLINE_FUNCTION_DELETED inline
+#else
+#define KOKKOS_INLINE_FUNCTION_DELETED __device__ __host__ inline
+#endif
+#if (CUDA_VERSION < 10000)
+#define KOKKOS_DEFAULTED_FUNCTION __host__ __device__ inline
+#else
+#define KOKKOS_DEFAULTED_FUNCTION inline
+#endif
+
+#if (CUDA_VERSION >= 10000)
+#define KOKKOS_CUDA_ENABLE_GRAPHS
+#endif
+
+#endif /* KOKKOS_CUDA_SETUP_HPP_ */
diff --git a/bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_HIP.hpp b/bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_HIP.hpp
new file mode 100644 (file)
index 0000000..f1df2f8
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SETUP_HIP_HPP_
+#define KOKKOS_SETUP_HIP_HPP_
+
+#if defined(KOKKOS_ENABLE_HIP)
+
+#define KOKKOS_IMPL_HIP_CLANG_WORKAROUND
+
+#define HIP_ENABLE_PRINTF
+#include <hip/hip_runtime.h>
+#include <hip/hip_runtime_api.h>
+
+#define KOKKOS_LAMBDA [=] __host__ __device__
+#if defined(KOKKOS_ENABLE_CXX17) || defined(KOKKOS_ENABLE_CXX20)
+#define KOKKOS_CLASS_LAMBDA [ =, *this ] __host__ __device__
+#endif
+
+#define KOKKOS_IMPL_FORCEINLINE_FUNCTION __device__ __host__ __forceinline__
+#define KOKKOS_IMPL_INLINE_FUNCTION __device__ __host__ inline
+#define KOKKOS_DEFAULTED_FUNCTION __device__ __host__ inline
+#define KOKKOS_INLINE_FUNCTION_DELETED __device__ __host__ inline
+#define KOKKOS_IMPL_FUNCTION __device__ __host__
+#define KOKKOS_IMPL_HOST_FUNCTION __host__
+#define KOKKOS_IMPL_DEVICE_FUNCTION __device__
+
+#endif  // #if defined( KOKKOS_ENABLE_HIP )
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_SYCL.hpp b/bundled/kokkos-3.7.00/core/src/setup/Kokkos_Setup_SYCL.hpp
new file mode 100644 (file)
index 0000000..b203e9a
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SETUP_SYCL_HPP_
+#define KOKKOS_SETUP_SYCL_HPP_
+
+// FIXME_SYCL the fallback assert is temporarily disabled by default in the
+// compiler so we need to force it
+#ifndef SYCL_ENABLE_FALLBACK_ASSERT
+#define SYCL_ENABLE_FALLBACK_ASSERT
+#endif
+#ifndef SYCL_FALLBACK_ASSERT
+#define SYCL_FALLBACK_ASSERT 1
+#endif
+
+#include <CL/sycl.hpp>
+
+#ifdef __SYCL_DEVICE_ONLY__
+#define KOKKOS_IMPL_DO_NOT_USE_PRINTF(format, ...)                \
+  do {                                                            \
+    const __attribute__((opencl_constant)) char fmt[] = (format); \
+    sycl::ext::oneapi::experimental::printf(fmt, ##__VA_ARGS__);  \
+  } while (0)
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_ExecutionSpaceTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_ExecutionSpaceTrait.hpp
new file mode 100644 (file)
index 0000000..cb28086
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_EXECUTIONSPACETRAIT_HPP
+#define KOKKOS_KOKKOS_EXECUTIONSPACETRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Concepts.hpp>  // is_execution_space
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+template <class T>
+struct show_extra_execution_space_erroneously_given_to_execution_policy;
+template <>
+struct show_extra_execution_space_erroneously_given_to_execution_policy<void> {
+};
+struct ExecutionSpaceTrait : TraitSpecificationBase<ExecutionSpaceTrait> {
+  struct base_traits {
+    static constexpr auto execution_space_is_defaulted = true;
+
+    using execution_space = Kokkos::DefaultExecutionSpace;
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class T>
+  using trait_matches_specification = Kokkos::is_execution_space<T>;
+  template <class ExecSpace, class AnalyzeNextTrait>
+  struct mixin_matching_trait : AnalyzeNextTrait {
+    using base_t = AnalyzeNextTrait;
+    using base_t::base_t;
+
+    static constexpr auto show_execution_space_error_in_compilation_message =
+        show_extra_execution_space_erroneously_given_to_execution_policy<
+            std::conditional_t<base_t::execution_space_is_defaulted, void,
+                               typename base_t::execution_space>>{};
+    static_assert(base_t::execution_space_is_defaulted,
+                  "Kokkos Error: More than one execution space given. Search "
+                  "compiler output for 'show_extra_execution_space' to see the "
+                  "type of the errant tag.");
+
+    static constexpr auto execution_space_is_defaulted = false;
+
+    using execution_space = ExecSpace;
+  };
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_EXECUTIONSPACETRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_GraphKernelTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_GraphKernelTrait.hpp
new file mode 100644 (file)
index 0000000..b16a777
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
+#define KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <impl/Kokkos_GraphImpl_fwd.hpp>  // IsGraphKernelTag
+#include <traits/Kokkos_Traits_fwd.hpp>
+#include <impl/Kokkos_Utilities.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+struct GraphKernelTrait : TraitSpecificationBase<GraphKernelTrait> {
+  struct base_traits {
+    using is_graph_kernel = std::false_type;
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class, class AnalyzeNextTrait>
+  struct mixin_matching_trait : AnalyzeNextTrait {
+    using base_t = AnalyzeNextTrait;
+    using base_t::base_t;
+    using is_graph_kernel = std::true_type;
+  };
+  template <class T>
+  using trait_matches_specification = std::is_same<T, IsGraphKernelTag>;
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_GRAPHKERNELTRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_IndexTypeTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_IndexTypeTrait.hpp
new file mode 100644 (file)
index 0000000..57f74d5
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_INDEXTYPETRAIT_HPP
+#define KOKKOS_KOKKOS_INDEXTYPETRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Concepts.hpp>  // IndexType
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Trait, class AnalyzeNextTrait>
+struct IndexTypePolicyMixin;
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+template <class T>
+struct show_extra_index_type_erroneously_given_to_execution_policy;
+template <>
+struct show_extra_index_type_erroneously_given_to_execution_policy<void> {};
+struct IndexTypeTrait : TraitSpecificationBase<IndexTypeTrait> {
+  struct base_traits {
+    static constexpr bool index_type_is_defaulted = true;
+    using index_type = dependent_policy_trait_default;
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class IdxType, class AnalyzeNextTrait>
+  using mixin_matching_trait = IndexTypePolicyMixin<IdxType, AnalyzeNextTrait>;
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="IndexTypePolicyMixin specializations"> {{{1
+
+// Index type given as IndexType template
+template <class IntegralIndexType, class AnalyzeNextTrait>
+struct IndexTypePolicyMixin<Kokkos::IndexType<IntegralIndexType>,
+                            AnalyzeNextTrait> : AnalyzeNextTrait {
+  using base_t = AnalyzeNextTrait;
+  using base_t::base_t;
+  static constexpr auto show_index_type_error_in_compilation_message =
+      show_extra_index_type_erroneously_given_to_execution_policy<
+          std::conditional_t<base_t::index_type_is_defaulted, void,
+                             typename base_t::schedule_type>>{};
+  static_assert(base_t::index_type_is_defaulted,
+                "Kokkos Error: More than one index type given. Search "
+                "compiler output for 'show_extra_index_type' to see the "
+                "type of the errant tag.");
+  static constexpr bool index_type_is_defaulted = false;
+  using index_type = Kokkos::IndexType<IntegralIndexType>;
+};
+
+// IndexType given as an integral type directly (the matcher already checks
+// this, so we don't have specialize to re-check it here)
+template <class IntegralIndexType, class AnalyzeNextTrait>
+struct IndexTypePolicyMixin : AnalyzeNextTrait {
+  using base_t = AnalyzeNextTrait;
+  using base_t::base_t;
+  static constexpr auto show_index_type_error_in_compilation_message =
+      show_extra_index_type_erroneously_given_to_execution_policy<
+          std::conditional_t<base_t::index_type_is_defaulted, void,
+                             typename base_t::schedule_type>>{};
+  static_assert(base_t::index_type_is_defaulted,
+                "Kokkos Error: More than one index type given. Search "
+                "compiler output for 'show_extra_index_type' to see the "
+                "type of the errant tag.");
+  static_assert(std::is_integral<IntegralIndexType>::value, "");
+  static constexpr bool index_type_is_defaulted = false;
+  using index_type = Kokkos::IndexType<IntegralIndexType>;
+};
+
+// </editor-fold> end AnalyzeExecPolicy specializations }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="PolicyTraitMatcher specialization"> {{{1
+
+template <class IntegralIndexType>
+struct PolicyTraitMatcher<IndexTypeTrait, IndexType<IntegralIndexType>>
+    : std::true_type {};
+
+template <class IntegralIndexType>
+struct PolicyTraitMatcher<
+    IndexTypeTrait, IntegralIndexType,
+    std::enable_if_t<std::is_integral<IntegralIndexType>::value>>
+    : std::true_type {};
+
+// </editor-fold> end PolicyTraitMatcher specialization"> }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_INDEXTYPETRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_IterationPatternTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_IterationPatternTrait.hpp
new file mode 100644 (file)
index 0000000..3c8ba47
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_ITERATIONPATTERNTRAIT_HPP
+#define KOKKOS_KOKKOS_ITERATIONPATTERNTRAIT_HPP
+
+#include <Kokkos_Concepts.hpp>                   // is_iteration_pattern
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>  // TraitSpecificationBase
+#include <Kokkos_Rank.hpp>                       // Rank
+#include <Kokkos_Layout.hpp>                     // Iterate
+#include <type_traits>                           // is_void
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+template <class T>
+struct show_extra_iteration_pattern_erroneously_given_to_execution_policy;
+template <>
+struct show_extra_iteration_pattern_erroneously_given_to_execution_policy<
+    void> {};
+struct IterationPatternTrait : TraitSpecificationBase<IterationPatternTrait> {
+  struct base_traits {
+    using iteration_pattern = void;  // TODO set default iteration pattern
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class IterPattern, class AnalyzeNextTrait>
+  struct mixin_matching_trait : AnalyzeNextTrait {
+    using base_t = AnalyzeNextTrait;
+    using base_t::base_t;
+    static constexpr auto show_iteration_pattern_error_in_compilation_message =
+        show_extra_iteration_pattern_erroneously_given_to_execution_policy<
+            typename base_t::iteration_pattern>{};
+    static_assert(
+        std::is_void<typename base_t::iteration_pattern>::value,
+        "Kokkos Error: More than one index type given. Search "
+        "compiler output for 'show_extra_iteration_pattern' to see the "
+        "type of the errant tag.");
+    using iteration_pattern = IterPattern;
+  };
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="PolicyTraitMatcher specialization"> {{{1
+
+template <unsigned N, Iterate OuterDir, Iterate InnerDir>
+struct PolicyTraitMatcher<IterationPatternTrait, Rank<N, OuterDir, InnerDir>>
+    : std::true_type {};
+
+// </editor-fold> end  }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_ITERATIONPATTERNTRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_LaunchBoundsTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_LaunchBoundsTrait.hpp
new file mode 100644 (file)
index 0000000..c20a883
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_LAUNCHBOUNDSTRAIT_HPP
+#define KOKKOS_KOKKOS_LAUNCHBOUNDSTRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Concepts.hpp>  // LaunchBounds
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+struct LaunchBoundsTrait : TraitSpecificationBase<LaunchBoundsTrait> {
+  struct base_traits {
+    static constexpr bool launch_bounds_is_defaulted = true;
+
+    using launch_bounds = LaunchBounds<>;
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class LaunchBoundParam, class AnalyzeNextTrait>
+  struct mixin_matching_trait : AnalyzeNextTrait {
+    using base_t = AnalyzeNextTrait;
+    using base_t::base_t;
+
+    static constexpr bool launch_bounds_is_defaulted = false;
+
+    static_assert(base_t::launch_bounds_is_defaulted,
+                  "Kokkos Error: More than one launch_bounds given");
+
+    using launch_bounds = LaunchBoundParam;
+  };
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="PolicyTraitMatcher specialization"> {{{1
+
+template <unsigned int maxT, unsigned int minB>
+struct PolicyTraitMatcher<LaunchBoundsTrait, LaunchBounds<maxT, minB>>
+    : std::true_type {};
+
+// </editor-fold> end PolicyTraitMatcher specialization }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_LAUNCHBOUNDSTRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_OccupancyControlTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_OccupancyControlTrait.hpp
new file mode 100644 (file)
index 0000000..b3328f8
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_OCCUPANCYCONTROLTRAIT_HPP
+#define KOKKOS_KOKKOS_OCCUPANCYCONTROLTRAIT_HPP
+
+#include <impl/Kokkos_Error.hpp>  // KOKKOS_EXPECTS macro
+
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+namespace Kokkos {
+
+namespace Experimental {
+
+//==============================================================================
+// <editor-fold desc="Occupancy control user interface"> {{{1
+
+struct MaximizeOccupancy;
+
+struct DesiredOccupancy {
+  int m_occ = 100;
+  explicit constexpr DesiredOccupancy(int occ) : m_occ(occ) {
+    KOKKOS_EXPECTS(0 <= occ && occ <= 100);
+  }
+  explicit constexpr operator int() const { return m_occ; }
+  constexpr int value() const { return m_occ; }
+  DesiredOccupancy() = default;
+  explicit DesiredOccupancy(MaximizeOccupancy const&) : DesiredOccupancy() {}
+};
+
+struct MaximizeOccupancy {
+  explicit MaximizeOccupancy() = default;
+};
+
+// </editor-fold> end Occupancy control user interface }}}1
+//==============================================================================
+
+}  // end namespace Experimental
+
+namespace Impl {
+
+template <class Policy, class AnalyzeNextTrait>
+struct OccupancyControlPolicyMixin;
+
+//==============================================================================
+// <editor-fold desc="Occupancy control trait specification"> {{{1
+
+struct OccupancyControlTrait : TraitSpecificationBase<OccupancyControlTrait> {
+  struct base_traits {
+    using occupancy_control = Kokkos::Experimental::MaximizeOccupancy;
+    static constexpr bool experimental_contains_desired_occupancy = false;
+    // Default access occupancy_control, for when it is the (stateless) default
+    static constexpr occupancy_control impl_get_occupancy_control() {
+      return occupancy_control{};
+    }
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class OccControl, class AnalyzeNextTrait>
+  using mixin_matching_trait =
+      OccupancyControlPolicyMixin<OccControl, AnalyzeNextTrait>;
+  template <class T>
+  using trait_matches_specification = std::integral_constant<
+      bool,
+      std::is_same<T, Kokkos::Experimental::DesiredOccupancy>::value ||
+          std::is_same<T, Kokkos::Experimental::MaximizeOccupancy>::value>;
+};
+
+// </editor-fold> end Occupancy control trait specification }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="OccupancyControlPolicyMixin specializations"> {{{1
+
+template <class AnalyzeNextTrait>
+struct OccupancyControlPolicyMixin<Kokkos::Experimental::DesiredOccupancy,
+                                   AnalyzeNextTrait> : AnalyzeNextTrait {
+  using base_t            = AnalyzeNextTrait;
+  using occupancy_control = Kokkos::Experimental::DesiredOccupancy;
+  static constexpr bool experimental_contains_desired_occupancy = true;
+
+  // Treat this as private, but make it public so that MSVC will still treat
+  // this as a standard layout class and make it the right size: storage for a
+  // stateful desired occupancy
+  //   private:
+  occupancy_control m_desired_occupancy = occupancy_control{};
+
+  OccupancyControlPolicyMixin() = default;
+  // Converting constructor
+  // Just rely on the convertibility of occupancy_control to transfer the data
+  template <class Other>
+  OccupancyControlPolicyMixin(ExecPolicyTraitsWithDefaults<Other> const& other)
+      : base_t(other),
+        m_desired_occupancy(other.impl_get_occupancy_control()) {}
+
+  // Converting assignment operator
+  // Just rely on the convertibility of occupancy_control to transfer the data
+  template <class Other>
+  OccupancyControlPolicyMixin& operator=(
+      ExecPolicyTraitsWithDefaults<Other> const& other) {
+    *static_cast<base_t*>(this) = other;
+    this->impl_set_desired_occupancy(
+        occupancy_control{other.impl_get_occupancy_control()});
+    return *this;
+  }
+
+  // Access to occupancy control instance, usable in generic context
+  constexpr occupancy_control impl_get_occupancy_control() const {
+    return m_desired_occupancy;
+  }
+
+  // Access to desired occupancy (getter and setter)
+  Kokkos::Experimental::DesiredOccupancy impl_get_desired_occupancy() const {
+    return m_desired_occupancy;
+  }
+
+  void impl_set_desired_occupancy(occupancy_control desired_occupancy) {
+    m_desired_occupancy = desired_occupancy;
+  }
+};
+
+template <class AnalyzeNextTrait>
+struct OccupancyControlPolicyMixin<Kokkos::Experimental::MaximizeOccupancy,
+                                   AnalyzeNextTrait> : AnalyzeNextTrait {
+  using base_t = AnalyzeNextTrait;
+  using base_t::base_t;
+  using occupancy_control = Kokkos::Experimental::MaximizeOccupancy;
+  static constexpr bool experimental_contains_desired_occupancy = false;
+};
+
+// </editor-fold> end OccupancyControlPolicyMixin specializations }}}1
+//==============================================================================
+
+}  // end namespace Impl
+
+namespace Experimental {
+
+//==============================================================================
+// <editor-fold desc="User interface"> {{{1
+
+template <typename Policy>
+auto prefer(Policy const& p, DesiredOccupancy occ) {
+  using new_policy_t =
+      Kokkos::Impl::OccupancyControlTrait::policy_with_trait<Policy,
+                                                             DesiredOccupancy>;
+  new_policy_t pwo{p};
+  pwo.impl_set_desired_occupancy(occ);
+  return pwo;
+}
+
+template <typename Policy>
+constexpr auto prefer(Policy const& p, MaximizeOccupancy) {
+  static_assert(Kokkos::is_execution_policy<Policy>::value, "");
+  using new_policy_t =
+      Kokkos::Impl::OccupancyControlTrait::policy_with_trait<Policy,
+                                                             MaximizeOccupancy>;
+  return new_policy_t{p};
+}
+
+// </editor-fold> end User interface }}}1
+//==============================================================================
+
+}  // end namespace Experimental
+
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_OCCUPANCYCONTROLTRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_PolicyTraitAdaptor.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_PolicyTraitAdaptor.hpp
new file mode 100644 (file)
index 0000000..e500dd4
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#include <impl/Kokkos_Utilities.hpp>  // type_list
+
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+#ifndef KOKKOS_KOKKOS_POLICYTRAITADAPTOR_HPP
+#define KOKKOS_KOKKOS_POLICYTRAITADAPTOR_HPP
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="Adapter for replacing/adding a trait"> {{{1
+
+//------------------------------------------------------------------------------
+
+// General strategy: given a TraitSpecification, go through the entries in the
+// parameter pack of the policy template and find the first one that returns
+// `true` for the nested `trait_matches_specification` variable template. If
+// that nested variable template is not found these overloads should be safely
+// ignored, and the trait can specialize PolicyTraitAdapterImpl to get the
+// desired behavior.
+
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="PolicyTraitMatcher"> {{{2
+
+// To handle the WorkTag case, we need more than just a predicate; we need
+// something that we can default to in the unspecialized case, just like we
+// do for AnalyzeExecPolicy
+template <class TraitSpec, class Trait, class Enable = void>
+struct PolicyTraitMatcher : std::false_type {};
+
+template <class TraitSpec, class Trait>
+struct PolicyTraitMatcher<
+    TraitSpec, Trait,
+    std::enable_if_t<
+        TraitSpec::template trait_matches_specification<Trait>::value>>
+    : std::true_type {};
+
+// </editor-fold> end PolicyTraitMatcher }}}2
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+// <editor-fold desc="PolicyTraitAdaptorImpl specializations"> {{{2
+
+// Matching version, replace the trait
+template <class TraitSpec, template <class...> class PolicyTemplate,
+          class... ProcessedTraits, class MatchingTrait,
+          class... ToProcessTraits, class NewTrait>
+struct PolicyTraitAdaptorImpl<
+    TraitSpec, PolicyTemplate, type_list<ProcessedTraits...>,
+    type_list<MatchingTrait, ToProcessTraits...>, NewTrait,
+    std::enable_if_t<PolicyTraitMatcher<TraitSpec, MatchingTrait>::value>> {
+  static_assert(PolicyTraitMatcher<TraitSpec, NewTrait>::value, "");
+  using type = PolicyTemplate<ProcessedTraits..., NewTrait, ToProcessTraits...>;
+};
+
+// Non-matching version, check the next option
+template <class TraitSpec, template <class...> class PolicyTemplate,
+          class... ProcessedTraits, class NonMatchingTrait,
+          class... ToProcessTraits, class NewTrait>
+struct PolicyTraitAdaptorImpl<
+    TraitSpec, PolicyTemplate, type_list<ProcessedTraits...>,
+    type_list<NonMatchingTrait, ToProcessTraits...>, NewTrait,
+    std::enable_if_t<!PolicyTraitMatcher<TraitSpec, NonMatchingTrait>::value>> {
+  using type = typename PolicyTraitAdaptorImpl<
+      TraitSpec, PolicyTemplate,
+      type_list<ProcessedTraits..., NonMatchingTrait>,
+      type_list<ToProcessTraits...>, NewTrait>::type;
+};
+
+// Base case: no matches found; just add the trait to the end of the list
+template <class TraitSpec, template <class...> class PolicyTemplate,
+          class... ProcessedTraits, class NewTrait>
+struct PolicyTraitAdaptorImpl<TraitSpec, PolicyTemplate,
+                              type_list<ProcessedTraits...>, type_list<>,
+                              NewTrait> {
+  static_assert(PolicyTraitMatcher<TraitSpec, NewTrait>::value, "");
+  using type = PolicyTemplate<ProcessedTraits..., NewTrait>;
+};
+
+// </editor-fold> end PolicyTraitAdaptorImpl specializations }}}2
+//------------------------------------------------------------------------------
+
+template <class TraitSpec, template <class...> class PolicyTemplate,
+          class... Traits, class NewTrait>
+struct PolicyTraitAdaptor<TraitSpec, PolicyTemplate<Traits...>, NewTrait>
+    : PolicyTraitAdaptorImpl<TraitSpec, PolicyTemplate, type_list<>,
+                             type_list<Traits...>, NewTrait> {};
+
+// </editor-fold> end Adapter for replacing/adding a trait }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="CRTP Base class for trait specifications"> {{{1
+
+template <class TraitSpec>
+struct TraitSpecificationBase {
+  using trait_specification = TraitSpec;
+  template <class Policy, class Trait>
+  using policy_with_trait =
+      typename PolicyTraitAdaptor<TraitSpec, Policy, Trait>::type;
+};
+
+// </editor-fold> end CRTP Base class for trait specifications }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_POLICYTRAITADAPTOR_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_PolicyTraitMatcher.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_PolicyTraitMatcher.hpp
new file mode 100644 (file)
index 0000000..3192732
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#include <impl/Kokkos_Utilities.hpp>  // type_list
+
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+#ifndef KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
+#define KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="PolicyTraitMatcher"> {{{1
+
+// To handle the WorkTag case, we need more than just a predicate; we need
+// something that we can default to in the unspecialized case, just like we
+// do for AnalyzeExecPolicy
+template <class TraitSpec, class Trait, class Enable = void>
+struct PolicyTraitMatcher : std::false_type {};
+
+template <class TraitSpec, class Trait>
+struct PolicyTraitMatcher<
+    TraitSpec, Trait,
+    std::enable_if_t<
+        TraitSpec::template trait_matches_specification<Trait>::value>>
+    : std::true_type {};
+
+// </editor-fold> end PolicyTraitMatcher }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_POLICYTRAITMATCHER_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_ScheduleTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_ScheduleTrait.hpp
new file mode 100644 (file)
index 0000000..311ab1a
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_SCHEDULETRAIT_HPP
+#define KOKKOS_KOKKOS_SCHEDULETRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Concepts.hpp>  // is_schedule_type, Schedule
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+namespace Kokkos {
+
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+template <class T>
+struct show_extra_schedule_type_erroneously_given_to_execution_policy;
+template <>
+struct show_extra_schedule_type_erroneously_given_to_execution_policy<void> {};
+struct ScheduleTrait : TraitSpecificationBase<ScheduleTrait> {
+  struct base_traits {
+    static constexpr auto schedule_type_is_defaulted = true;
+
+    using schedule_type = Schedule<Static>;
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class Sched, class AnalyzeNextTrait>
+  struct mixin_matching_trait : AnalyzeNextTrait {
+    using base_t = AnalyzeNextTrait;
+    using base_t::base_t;
+    using schedule_type = Sched;
+    static constexpr auto show_schedule_type_error_in_compilation_message =
+        show_extra_schedule_type_erroneously_given_to_execution_policy<
+            std::conditional_t<base_t::schedule_type_is_defaulted, void,
+                               typename base_t::schedule_type>>{};
+    static_assert(base_t::schedule_type_is_defaulted,
+                  "Kokkos Error: More than one schedule type given. Search "
+                  "compiler output for 'show_extra_schedule_type' to see the "
+                  "type of the errant tag.");
+    static constexpr bool schedule_type_is_defaulted = false;
+  };
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+//==============================================================================
+// <editor-fold desc="PolicyTraitMatcher specialization"> {{{1
+
+template <class Sched>
+struct PolicyTraitMatcher<ScheduleTrait, Schedule<Sched>> : std::true_type {};
+
+// </editor-fold> end PolicyTraitMatcher specialization }}}1
+//==============================================================================
+
+}  // end namespace Impl
+
+namespace Experimental {
+
+//==============================================================================
+// <editor-fold desc="User interface"> {{{1
+
+template <class Policy, class ScheduleType>
+constexpr auto require(Policy const& p, Kokkos::Schedule<ScheduleType>) {
+  static_assert(Kokkos::is_execution_policy<Policy>::value, "");
+  using new_policy_t = Kokkos::Impl::ScheduleTrait::policy_with_trait<
+      Policy, Kokkos::Schedule<ScheduleType>>;
+  return new_policy_t{p};
+}
+
+// </editor-fold> end User interface }}}1
+//==============================================================================
+
+}  // end namespace Experimental
+
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_SCHEDULETRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_Traits_fwd.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_Traits_fwd.hpp
new file mode 100644 (file)
index 0000000..c6b4fe4
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_TRAITS_FWD_HPP
+#define KOKKOS_KOKKOS_TRAITS_FWD_HPP
+
+// Without this the CUDA side does proper EBO while MSVC doesn't
+// leading to mismatched sizes of the driver objects (CudaParallel)
+// leading to illegal memory accesses etc on device
+#if defined(_WIN32) && defined(KOKKOS_ENABLE_CUDA)
+#define KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND char dummy;
+#else
+#define KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+#endif
+
+namespace Kokkos {
+namespace Impl {
+
+template <class Enable, class... TraitsList>
+struct AnalyzeExecPolicy;
+
+template <class Enable, class TraitSpecList, class... Traits>
+struct AnalyzeExecPolicyUseMatcher;
+
+template <class AnalysisResults>
+struct ExecPolicyTraitsWithDefaults;
+
+template <class TraitSpec, class Trait, class Enable>
+struct PolicyTraitMatcher;
+
+template <class TraitSpec, template <class...> class PolicyTemplate,
+          class AlreadyProcessedList, class ToProcessList, class NewTrait,
+          class Enable = void>
+struct PolicyTraitAdaptorImpl;
+
+template <class TraitSpec, class Policy, class NewTrait>
+struct PolicyTraitAdaptor;
+
+// A tag class for dependent defaults that must be handled by the
+// ExecPolicyTraitsWithDefaults wrapper, since their defaults depend on other
+// traits
+struct dependent_policy_trait_default;
+
+//==============================================================================
+// <editor-fold desc="Execution policy trait specifications"> {{{1
+
+struct ExecutionSpaceTrait;
+struct IndexTypeTrait;
+struct ScheduleTrait;
+struct IterationPatternTrait;
+struct WorkItemPropertyTrait;
+struct LaunchBoundsTrait;
+struct OccupancyControlTrait;
+struct GraphKernelTrait;
+struct WorkTagTrait;
+
+// Keep these sorted by frequency of use to reduce compilation time
+//
+// clang-format off
+using execution_policy_trait_specifications =
+  type_list<
+    ExecutionSpaceTrait,
+    IndexTypeTrait,
+    ScheduleTrait,
+    IterationPatternTrait,
+    WorkItemPropertyTrait,
+    LaunchBoundsTrait,
+    OccupancyControlTrait,
+    GraphKernelTrait,
+    // This one has to be last, unfortunately:
+    WorkTagTrait
+  >;
+// clang-format on
+
+// </editor-fold> end Execution policy trait specifications }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_TRAITS_FWD_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_WorkItemPropertyTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_WorkItemPropertyTrait.hpp
new file mode 100644 (file)
index 0000000..edc488a
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_WORKITEMPROPERTYTRAIT_HPP
+#define KOKKOS_KOKKOS_WORKITEMPROPERTYTRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Concepts.hpp>  // WorkItemProperty
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <traits/Kokkos_Traits_fwd.hpp>
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+struct WorkItemPropertyTrait : TraitSpecificationBase<WorkItemPropertyTrait> {
+  struct base_traits {
+    using work_item_property = Kokkos::Experimental::WorkItemProperty::None_t;
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class WorkItemProp, class AnalyzeNextTrait>
+  struct mixin_matching_trait : AnalyzeNextTrait {
+    using base_t = AnalyzeNextTrait;
+    using base_t::base_t;
+    using work_item_property = WorkItemProp;
+  };
+  template <class T>
+  using trait_matches_specification =
+      Kokkos::Experimental::is_work_item_property<T>;
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+}  // end namespace Impl
+
+namespace Experimental {
+
+//==============================================================================
+// <editor-fold desc="User interface"> {{{1
+
+template <class Policy, unsigned long Property>
+constexpr auto require(const Policy p,
+                       WorkItemProperty::ImplWorkItemProperty<Property>) {
+  static_assert(Kokkos::is_execution_policy<Policy>::value, "");
+  using new_policy_t = Kokkos::Impl::WorkItemPropertyTrait::policy_with_trait<
+      Policy, WorkItemProperty::ImplWorkItemProperty<Property>>;
+  return new_policy_t{p};
+}
+
+// </editor-fold> end User interface }}}1
+//==============================================================================
+
+}  // namespace Experimental
+
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_WORKITEMPROPERTYTRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/core/src/traits/Kokkos_WorkTagTrait.hpp b/bundled/kokkos-3.7.00/core/src/traits/Kokkos_WorkTagTrait.hpp
new file mode 100644 (file)
index 0000000..f306e43
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_KOKKOS_WORKTAGTRAIT_HPP
+#define KOKKOS_KOKKOS_WORKTAGTRAIT_HPP
+
+#include <Kokkos_Macros.hpp>
+#include <Kokkos_Concepts.hpp>  // is_execution_space
+#include <traits/Kokkos_PolicyTraitAdaptor.hpp>
+#include <traits/Kokkos_Traits_fwd.hpp>
+#include <impl/Kokkos_Utilities.hpp>  // type_list_any, type_list_remove_first
+
+namespace Kokkos {
+namespace Impl {
+
+//==============================================================================
+// <editor-fold desc="trait specification"> {{{1
+
+template <class T>
+struct show_extra_work_tag_erroneously_given_to_execution_policy;
+template <>
+struct show_extra_work_tag_erroneously_given_to_execution_policy<void> {};
+
+using _exec_policy_traits_without_work_tag = typename type_list_remove_first<
+    WorkTagTrait, execution_policy_trait_specifications>::type;
+
+template <class Trait>
+struct _trait_matches_spec_predicate {
+  template <class TraitSpec>
+  struct apply {
+    using type = typename PolicyTraitMatcher<TraitSpec, Trait>::type;
+    static constexpr bool value = type::value;
+  };
+};
+
+struct WorkTagTrait : TraitSpecificationBase<WorkTagTrait> {
+  struct base_traits {
+    using work_tag = void;
+    KOKKOS_IMPL_MSVC_NVCC_EBO_WORKAROUND
+  };
+  template <class WorkTag, class AnalyzeNextTrait>
+  struct mixin_matching_trait : AnalyzeNextTrait {
+    using base_t = AnalyzeNextTrait;
+    using base_t::base_t;
+    using work_tag = WorkTag;
+    static constexpr auto show_work_tag_error_in_compilation_message =
+        show_extra_work_tag_erroneously_given_to_execution_policy<
+            typename base_t::work_tag>{};
+    static_assert(
+        std::is_void<typename base_t::work_tag>::value,
+        "Kokkos Error: More than one work tag given. Search compiler output "
+        "for 'show_extra_work_tag' to see the type of the errant tag.");
+  };
+  // Since we don't have subsumption in pre-C++20, we need to have the work tag
+  // "trait" handling code ensure that none of the other conditions are met.
+  // * Compile time cost complexity note: at first glance it looks like this
+  //   "rechecks" all of the other trait specs when used in the context of the
+  //   full list of execution policy traits, but actually since we've already
+  //   checked all of them to get to the end of the list, the compiler will
+  //   have already generated those definitions, so there should be little extra
+  //   cost to this. However, in the scenario where we use work tag in isolation
+  //   (like if we were to add a `require()`-like thing that changes the work
+  //   tag of an existing execution policy instance), we need to check all of
+  //   the other traits to make sure that we're not replacing something else,
+  //   given that the concept of a work tag is basically unconstrained and could
+  //   be anything.  This should still be as efficient at compile time as the
+  //   old code that just did a big long series of nested std::conditionals, but
+  //   we should benchmark this assumption if it becomes a problem.
+  template <class T>
+  using trait_matches_specification = std::integral_constant<
+      bool,
+#ifndef KOKKOS_ENABLE_DEPRECATED_CODE_3
+      std::is_empty<T>::value &&
+#else
+      !std::is_void<T>::value &&
+#endif
+          !type_list_any<_trait_matches_spec_predicate<T>::template apply,
+                         _exec_policy_traits_without_work_tag>::value>;
+};
+
+// </editor-fold> end trait specification }}}1
+//==============================================================================
+
+}  // end namespace Impl
+}  // end namespace Kokkos
+
+#endif  // KOKKOS_KOKKOS_WORKTAGTRAIT_HPP
diff --git a/bundled/kokkos-3.7.00/simd/cmake/Dependencies.cmake b/bundled/kokkos-3.7.00/simd/cmake/Dependencies.cmake
new file mode 100644 (file)
index 0000000..5e29157
--- /dev/null
@@ -0,0 +1,5 @@
+TRIBITS_PACKAGE_DEFINE_DEPENDENCIES(
+  LIB_REQUIRED_PACKAGES KokkosCore
+  LIB_OPTIONAL_TPLS Pthread CUDA HWLOC HPX
+  TEST_OPTIONAL_TPLS CUSPARSE
+  )
diff --git a/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD.hpp b/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD.hpp
new file mode 100644 (file)
index 0000000..a15a1b8
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SIMD_HPP
+#define KOKKOS_SIMD_HPP
+
+#include <Kokkos_SIMD_Common.hpp>
+
+#include <Kokkos_SIMD_Scalar.hpp>
+
+#ifdef KOKKOS_ARCH_AVX512XEON
+#include <Kokkos_SIMD_AVX512.hpp>
+#endif
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace simd_abi {
+
+namespace Impl {
+
+#if defined(KOKKOS_ARCH_AVX512XEON)
+using host_native = avx512_fixed_size<8>;
+#else
+using host_native  = scalar;
+#endif
+
+template <class T>
+struct ForSpace;
+
+#ifdef KOKKOS_ENABLE_SERIAL
+template <>
+struct ForSpace<Kokkos::Serial> {
+  using type = host_native;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_CUDA
+template <>
+struct ForSpace<Kokkos::Cuda> {
+  using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_THREADS
+template <>
+struct ForSpace<Kokkos::Threads> {
+  using type = host_native;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_HPX
+template <>
+struct ForSpace<Kokkos::Experimental::HPX> {
+  using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMP
+template <>
+struct ForSpace<Kokkos::OpenMP> {
+  using type = host_native;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_OPENMPTARGET
+template <>
+struct ForSpace<Kokkos::Experimental::OpenMPTarget> {
+  using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_HIP
+template <>
+struct ForSpace<Kokkos::Experimental::HIP> {
+  using type = scalar;
+};
+#endif
+
+#ifdef KOKKOS_ENABLE_SYCL
+template <>
+struct ForSpace<Kokkos::Experimental::SYCL> {
+  using type = scalar;
+};
+#endif
+
+}  // namespace Impl
+
+template <class Space>
+using ForSpace = typename Impl::ForSpace<typename Space::execution_space>::type;
+
+template <class T>
+using native = ForSpace<Kokkos::DefaultExecutionSpace>;
+
+}  // namespace simd_abi
+
+template <class T>
+using native_simd = simd<T, simd_abi::native<T>>;
+template <class T>
+using native_simd_mask = simd_mask<T, simd_abi::native<T>>;
+
+namespace Impl {
+
+template <class... Abis>
+class abi_set {};
+
+#ifdef KOKKOS_ARCH_AVX512XEON
+using host_abi_set = abi_set<simd_abi::scalar, simd_abi::avx512_fixed_size<8>>;
+#else
+using host_abi_set = abi_set<simd_abi::scalar>;
+#endif
+
+using device_abi_set = abi_set<simd_abi::scalar>;
+
+}  // namespace Impl
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_AVX512.hpp b/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_AVX512.hpp
new file mode 100644 (file)
index 0000000..1df0730
--- /dev/null
@@ -0,0 +1,1023 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SIMD_AVX512_HPP
+#define KOKKOS_SIMD_AVX512_HPP
+
+#include <functional>
+#include <type_traits>
+
+#include <Kokkos_SIMD_Common.hpp>
+
+#include <immintrin.h>
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace simd_abi {
+
+template <int N>
+class avx512_fixed_size {};
+
+}  // namespace simd_abi
+
+template <class T>
+class simd_mask<T, simd_abi::avx512_fixed_size<8>> {
+  __mmask8 m_value;
+
+ public:
+  class reference {
+    __mmask8& m_mask;
+    int m_lane;
+    KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION __mmask8 bit_mask() const {
+      return __mmask8(std::int16_t(1 << m_lane));
+    }
+
+   public:
+    KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference(__mmask8& mask_arg,
+                                                    int lane_arg)
+        : m_mask(mask_arg), m_lane(lane_arg) {}
+    KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference
+    operator=(bool value) const {
+      if (value) {
+        m_mask |= bit_mask();
+      } else {
+        m_mask &= ~bit_mask();
+      }
+      return *this;
+    }
+    KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION operator bool() const {
+      return (m_mask & bit_mask()) != 0;
+    }
+  };
+  using value_type                                  = bool;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask() = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+      : m_value(-std::int16_t(value)) {}
+  template <class U>
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask(
+      simd_mask<U, simd_abi::avx512_fixed_size<8>> const& other)
+      : m_value(static_cast<__mmask8>(other)) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+    return 8;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd_mask(
+      __mmask8 const& value_in)
+      : m_value(value_in) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __mmask8()
+      const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+    return reference(m_value, int(i));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+  operator[](std::size_t i) const {
+    return static_cast<value_type>(reference(m_value, int(i)));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+  operator||(simd_mask const& other) const {
+    return simd_mask(_kor_mask8(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask
+  operator&&(simd_mask const& other) const {
+    return simd_mask(_kand_mask8(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd_mask operator!() const {
+    static const __mmask8 true_value(static_cast<__mmask8>(simd_mask(true)));
+    return simd_mask(_kxor_mask8(true_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator==(
+      simd_mask const& other) const {
+    return m_value == other.m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION bool operator!=(
+      simd_mask const& other) const {
+    return m_value != other.m_value;
+  }
+};
+
+template <>
+class simd<std::int32_t, simd_abi::avx512_fixed_size<8>> {
+  __m256i m_value;
+
+ public:
+  using value_type = std::int32_t;
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using mask_type  = simd_mask<value_type, abi_type>;
+  using reference  = value_type&;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd()            = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&)      = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+    return 8;
+  }
+  template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+                                      bool> = false>
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+      : m_value(_mm256_set1_epi32(value_type(value))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+      __m256i const& value_in)
+      : m_value(value_in) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+      simd<std::uint64_t, abi_type> const& other);
+  template <class G,
+            std::enable_if_t<
+                // basically, can you do { value_type r =
+                // gen(std::integral_constant<std::size_t, i>()); }
+                std::is_invocable_r_v<value_type, G,
+                                      std::integral_constant<std::size_t, 0>>,
+                bool> = false>
+  KOKKOS_FORCEINLINE_FUNCTION simd(G&& gen)
+      : m_value(
+            _mm256_setr_epi32(gen(std::integral_constant<std::size_t, 0>()),
+                              gen(std::integral_constant<std::size_t, 1>()),
+                              gen(std::integral_constant<std::size_t, 2>()),
+                              gen(std::integral_constant<std::size_t, 3>()),
+                              gen(std::integral_constant<std::size_t, 4>()),
+                              gen(std::integral_constant<std::size_t, 5>()),
+                              gen(std::integral_constant<std::size_t, 6>()),
+                              gen(std::integral_constant<std::size_t, 7>()))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+    return reinterpret_cast<value_type*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+  operator[](std::size_t i) const {
+    return reinterpret_cast<value_type const*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+      value_type* ptr, element_aligned_tag) const {
+    _mm256_mask_storeu_epi32(ptr, static_cast<__mmask8>(mask_type(true)),
+                             m_value);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+                                                       element_aligned_tag) {
+    m_value = _mm256_mask_loadu_epi32(
+        _mm256_set1_epi32(0), static_cast<__mmask8>(mask_type(true)), ptr);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+      const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<(simd const& other) const {
+    return mask_type(_mm256_cmplt_epi32_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>(simd const& other) const {
+    return mask_type(_mm256_cmplt_epi32_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<=(simd const& other) const {
+    return mask_type(_mm256_cmple_epi32_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>=(simd const& other) const {
+    return mask_type(_mm256_cmple_epi32_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator==(simd const& other) const {
+    return mask_type(_mm256_cmpeq_epi32_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator!=(simd const& other) const {
+    return mask_type(_mm256_cmpneq_epi32_mask(m_value, other.m_value));
+  }
+};
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
+    operator*(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_mullo_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
+    operator+(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_add_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_sub_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int32_t, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& a) {
+  return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(0) - a;
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int32_t, simd_abi::avx512_fixed_size<8>> condition(
+    simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& b,
+    simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& c) {
+  return simd<std::int32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_mask_blend_epi32(static_cast<__mmask8>(a), static_cast<__m256i>(c),
+                              static_cast<__m256i>(b)));
+}
+
+template <>
+class simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> {
+  __m256i m_value;
+
+ public:
+  using value_type = std::uint32_t;
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using mask_type  = simd_mask<value_type, abi_type>;
+  using reference  = value_type&;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd()            = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&)      = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+    return 8;
+  }
+  template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+                                      bool> = false>
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+      : m_value(_mm256_set1_epi32(bit_cast<std::int32_t>(value_type(value)))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+      __m256i const& value_in)
+      : m_value(value_in) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+      simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& other)
+      : m_value(static_cast<__m256i>(other)) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+    return reinterpret_cast<value_type*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+  operator[](std::size_t i) const {
+    return reinterpret_cast<value_type const*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m256i()
+      const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<(simd const& other) const {
+    return mask_type(_mm256_cmplt_epu32_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>(simd const& other) const {
+    return mask_type(_mm256_cmplt_epu32_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<=(simd const& other) const {
+    return mask_type(_mm256_cmple_epu32_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>=(simd const& other) const {
+    return mask_type(_mm256_cmple_epu32_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator==(simd const& other) const {
+    return mask_type(_mm256_cmpeq_epu32_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator!=(simd const& other) const {
+    return mask_type(_mm256_cmpneq_epu32_mask(m_value, other.m_value));
+  }
+};
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>
+    operator*(simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_mullo_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>
+    operator+(simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_add_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_sub_epi32(static_cast<__m256i>(lhs), static_cast<__m256i>(rhs)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> condition(
+    simd_mask<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& b,
+    simd<std::uint32_t, simd_abi::avx512_fixed_size<8>> const& c) {
+  return simd<std::uint32_t, simd_abi::avx512_fixed_size<8>>(
+      _mm256_mask_blend_epi32(static_cast<__mmask8>(a), static_cast<__m256i>(c),
+                              static_cast<__m256i>(b)));
+}
+
+template <>
+class simd<std::int64_t, simd_abi::avx512_fixed_size<8>> {
+  __m512i m_value;
+
+ public:
+  using value_type = std::int64_t;
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using mask_type  = simd_mask<value_type, abi_type>;
+  using reference  = value_type&;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd()            = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&)      = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+    return 8;
+  }
+  template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+                                      bool> = false>
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+      : m_value(_mm512_set1_epi64(value_type(value))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+      simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& other)
+      : m_value(_mm512_cvtepi32_epi64(static_cast<__m256i>(other))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+      simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other);
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr simd(__m512i const& value_in)
+      : m_value(value_in) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+    return reinterpret_cast<value_type*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+  operator[](std::size_t i) const {
+    return reinterpret_cast<value_type const*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+      value_type* ptr, element_aligned_tag) const {
+    _mm512_mask_storeu_epi64(ptr, static_cast<__mmask8>(mask_type(true)),
+                             m_value);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator>>(int rhs) const {
+    return _mm512_srai_epi64(m_value, rhs);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+  operator>>(simd<int, simd_abi::avx512_fixed_size<8>> const& rhs) const {
+    return _mm512_srav_epi64(m_value,
+                             _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator<<(int rhs) const {
+    return _mm512_slli_epi64(m_value, rhs);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+  operator<<(simd<int, simd_abi::avx512_fixed_size<8>> const& rhs) const {
+    return _mm512_sllv_epi64(m_value,
+                             _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
+      const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<(simd const& other) const {
+    return mask_type(_mm512_cmplt_epi64_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>(simd const& other) const {
+    return mask_type(_mm512_cmplt_epi64_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<=(simd const& other) const {
+    return mask_type(_mm512_cmple_epi64_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>=(simd const& other) const {
+    return mask_type(_mm512_cmple_epi64_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator==(simd const& other) const {
+    return mask_type(_mm512_cmpeq_epi64_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator!=(simd const& other) const {
+    return mask_type(_mm512_cmpneq_epi64_mask(m_value, other.m_value));
+  }
+};
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
+    operator*(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_mullo_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
+    operator+(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_add_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_sub_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::int64_t, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& a) {
+  return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(0) - a;
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int64_t, simd_abi::avx512_fixed_size<8>> condition(
+    simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& b,
+    simd<std::int64_t, simd_abi::avx512_fixed_size<8>> const& c) {
+  return simd<std::int64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_mask_blend_epi64(static_cast<__mmask8>(a), static_cast<__m512i>(c),
+                              static_cast<__m512i>(b)));
+}
+
+template <>
+class simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> {
+  __m512i m_value;
+
+ public:
+  using value_type = std::uint64_t;
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using mask_type  = simd_mask<value_type, abi_type>;
+  using reference  = value_type&;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd()            = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&)      = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+    return 8;
+  }
+  template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+                                      bool> = false>
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+      : m_value(_mm512_set1_epi64(bit_cast<std::int64_t>(value_type(value)))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr simd(__m512i const& value_in)
+      : m_value(value_in) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+      simd<std::int32_t, abi_type> const& other)
+      : m_value(_mm512_cvtepi32_epi64(static_cast<__m256i>(other))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION explicit simd(
+      simd<std::int64_t, abi_type> const& other)
+      : m_value(static_cast<__m512i>(other)) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+    return reinterpret_cast<value_type*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+  operator[](std::size_t i) const {
+    return reinterpret_cast<value_type const*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+  operator>>(unsigned int rhs) const {
+    return _mm512_srli_epi64(m_value, rhs);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator>>(
+      simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) const {
+    return _mm512_srlv_epi64(m_value,
+                             _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+  operator<<(unsigned int rhs) const {
+    return _mm512_slli_epi64(m_value, rhs);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd operator<<(
+      simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& rhs) const {
+    return _mm512_sllv_epi64(m_value,
+                             _mm512_cvtepi32_epi64(static_cast<__m256i>(rhs)));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+  operator&(simd const& other) const {
+    return _mm512_and_epi64(m_value, other.m_value);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd
+  operator|(simd const& other) const {
+    return _mm512_or_epi64(m_value, other.m_value);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512i()
+      const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<(simd const& other) const {
+    return mask_type(_mm512_cmplt_epu64_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>(simd const& other) const {
+    return mask_type(_mm512_cmplt_epu64_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<=(simd const& other) const {
+    return mask_type(_mm512_cmple_epu64_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>=(simd const& other) const {
+    return mask_type(_mm512_cmple_epu64_mask(other.m_value, m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator==(simd const& other) const {
+    return mask_type(_mm512_cmpeq_epu64_mask(m_value, other.m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator!=(simd const& other) const {
+    return mask_type(_mm512_cmpneq_epu64_mask(m_value, other.m_value));
+  }
+};
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>
+    operator*(simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_mullo_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>
+    operator+(simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_add_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_sub_epi64(static_cast<__m512i>(lhs), static_cast<__m512i>(rhs)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> condition(
+    simd_mask<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& b,
+    simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& c) {
+  return simd<std::uint64_t, simd_abi::avx512_fixed_size<8>>(
+      _mm512_mask_blend_epi64(static_cast<__mmask8>(a), static_cast<__m512i>(c),
+                              static_cast<__m512i>(b)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int32_t, simd_abi::avx512_fixed_size<8>>::simd(
+    simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other)
+    : m_value(_mm512_cvtepi64_epi32(static_cast<__m512i>(other))) {}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<std::int64_t, simd_abi::avx512_fixed_size<8>>::simd(
+    simd<std::uint64_t, simd_abi::avx512_fixed_size<8>> const& other)
+    : m_value(static_cast<__m512i>(other)) {}
+
+template <>
+class simd<double, simd_abi::avx512_fixed_size<8>> {
+  __m512d m_value;
+
+ public:
+  using value_type = double;
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using mask_type  = simd_mask<value_type, abi_type>;
+  using reference  = value_type&;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd()            = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(simd&&)      = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd const&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd& operator=(simd&&) = default;
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION static constexpr std::size_t size() {
+    return 8;
+  }
+  template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+                                      bool> = false>
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(U&& value)
+      : m_value(_mm512_set1_pd(value_type(value))) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION simd(double a, double b, double c,
+                                             double d, double e, double f,
+                                             double g, double h)
+      : m_value(_mm512_setr_pd(a, b, c, d, e, f, g, h)) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit simd(
+      __m512d const& value_in)
+      : m_value(value_in) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION reference operator[](std::size_t i) {
+    return reinterpret_cast<value_type*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION value_type
+  operator[](std::size_t i) const {
+    return reinterpret_cast<value_type const*>(&m_value)[i];
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_from(value_type const* ptr,
+                                                       element_aligned_tag) {
+    m_value = _mm512_loadu_pd(ptr);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void copy_to(
+      value_type* ptr, element_aligned_tag) const {
+    _mm512_storeu_pd(ptr, m_value);
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr explicit operator __m512d()
+      const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<(simd const& other) const {
+    return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_LT_OS));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>(simd const& other) const {
+    return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_GT_OS));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator<=(simd const& other) const {
+    return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_LE_OS));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator>=(simd const& other) const {
+    return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_GE_OS));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator==(simd const& other) const {
+    return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_EQ_OS));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION mask_type
+  operator!=(simd const& other) const {
+    return mask_type(_mm512_cmp_pd_mask(m_value, other.m_value, _CMP_NEQ_OS));
+  }
+};
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<double, simd_abi::avx512_fixed_size<8>>
+    operator*(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_mul_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<double, simd_abi::avx512_fixed_size<8>>
+    operator/(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_div_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<double, simd_abi::avx512_fixed_size<8>>
+    operator+(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_add_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<double, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<double, simd_abi::avx512_fixed_size<8>> const& lhs,
+              simd<double, simd_abi::avx512_fixed_size<8>> const& rhs) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_sub_pd(static_cast<__m512d>(lhs), static_cast<__m512d>(rhs)));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+    simd<double, simd_abi::avx512_fixed_size<8>>
+    operator-(simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_sub_pd(_mm512_set1_pd(0.0), static_cast<__m512d>(a)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> copysign(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<double, simd_abi::avx512_fixed_size<8>> const& b) {
+  static const __m512i sign_mask = reinterpret_cast<__m512i>(
+      static_cast<__m512d>(simd<double, simd_abi::avx512_fixed_size<8>>(-0.0)));
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      reinterpret_cast<__m512d>(_mm512_xor_epi64(
+          _mm512_andnot_epi64(
+              sign_mask, reinterpret_cast<__m512i>(static_cast<__m512d>(a))),
+          _mm512_and_epi64(
+              sign_mask, reinterpret_cast<__m512i>(static_cast<__m512d>(b))))));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> abs(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
+  __m512d const rhs = static_cast<__m512d>(a);
+  return simd<double, simd_abi::avx512_fixed_size<8>>(reinterpret_cast<__m512d>(
+      _mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),
+                       reinterpret_cast<__m512i>(rhs))));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> sqrt(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_sqrt_pd(static_cast<__m512d>(a)));
+}
+
+#ifdef __INTEL_COMPILER
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> cbrt(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_cbrt_pd(static_cast<__m512d>(a)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> exp(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_exp_pd(static_cast<__m512d>(a)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> log(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_log_pd(static_cast<__m512d>(a)));
+}
+
+#endif
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> fma(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<double, simd_abi::avx512_fixed_size<8>> const& b,
+    simd<double, simd_abi::avx512_fixed_size<8>> const& c) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_fmadd_pd(static_cast<__m512d>(a), static_cast<__m512d>(b),
+                      static_cast<__m512d>(c)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> max(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<double, simd_abi::avx512_fixed_size<8>> const& b) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_max_pd(static_cast<__m512d>(a), static_cast<__m512d>(b)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> min(
+    simd<double, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<double, simd_abi::avx512_fixed_size<8>> const& b) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_min_pd(static_cast<__m512d>(a), static_cast<__m512d>(b)));
+}
+
+KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+simd<double, simd_abi::avx512_fixed_size<8>> condition(
+    simd_mask<double, simd_abi::avx512_fixed_size<8>> const& a,
+    simd<double, simd_abi::avx512_fixed_size<8>> const& b,
+    simd<double, simd_abi::avx512_fixed_size<8>> const& c) {
+  return simd<double, simd_abi::avx512_fixed_size<8>>(
+      _mm512_mask_blend_pd(static_cast<__mmask8>(a), static_cast<__m512d>(c),
+                           static_cast<__m512d>(b)));
+}
+
+template <>
+class const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+                             simd<double, simd_abi::avx512_fixed_size<8>>> {
+ public:
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using value_type = simd<double, abi_type>;
+  using mask_type  = simd_mask<double, abi_type>;
+
+ protected:
+  value_type& m_value;
+  mask_type const& m_mask;
+
+ public:
+  const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+      : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+  [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr mask_type const&
+  mask() const {
+    return m_mask;
+  }
+  [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr value_type const&
+  value() const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+  void copy_to(double* mem, element_aligned_tag) const {
+    _mm512_mask_storeu_pd(mem, static_cast<__mmask8>(m_mask),
+                          static_cast<__m512d>(m_value));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+  void scatter_to(
+      double* mem,
+      simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) const {
+    _mm512_mask_i32scatter_pd(mem, static_cast<__mmask8>(m_mask),
+                              static_cast<__m256i>(index),
+                              static_cast<__m512d>(m_value), 8);
+  }
+};
+
+template <>
+class where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+                       simd<double, simd_abi::avx512_fixed_size<8>>>
+    : public const_where_expression<
+          simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+          simd<double, simd_abi::avx512_fixed_size<8>>> {
+ public:
+  where_expression(
+      simd_mask<double, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+      simd<double, simd_abi::avx512_fixed_size<8>>& value_arg)
+      : const_where_expression(mask_arg, value_arg) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+  void copy_from(double const* mem, element_aligned_tag) {
+    m_value = value_type(_mm512_mask_loadu_pd(
+        _mm512_set1_pd(0.0), static_cast<__mmask8>(m_mask), mem));
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+  void gather_from(
+      double const* mem,
+      simd<std::int32_t, simd_abi::avx512_fixed_size<8>> const& index) {
+    m_value = value_type(_mm512_mask_i32gather_pd(
+        _mm512_set1_pd(0.0), static_cast<__mmask8>(m_mask),
+        static_cast<__m256i>(index), mem, 8));
+  }
+  template <class U, std::enable_if_t<
+                         std::is_convertible_v<
+                             U, simd<double, simd_abi::avx512_fixed_size<8>>>,
+                         bool> = false>
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION void operator=(U&& x) {
+    auto const x_as_value_type =
+        static_cast<simd<double, simd_abi::avx512_fixed_size<8>>>(
+            std::forward<U>(x));
+    m_value = simd<double, simd_abi::avx512_fixed_size<8>>(_mm512_mask_blend_pd(
+        static_cast<__mmask8>(m_mask), static_cast<__m512d>(m_value),
+        static_cast<__m512d>(x_as_value_type)));
+  }
+};
+
+template <>
+class const_where_expression<
+    simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+    simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using value_type = simd<std::int32_t, abi_type>;
+  using mask_type  = simd_mask<std::int32_t, abi_type>;
+
+ protected:
+  value_type& m_value;
+  mask_type const& m_mask;
+
+ public:
+  const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+      : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+  [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr mask_type const&
+  mask() const {
+    return m_mask;
+  }
+  [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr value_type const&
+  value() const {
+    return m_value;
+  }
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+  void copy_to(std::int32_t* mem, element_aligned_tag) const {
+    _mm256_mask_storeu_epi32(mem, static_cast<__mmask8>(m_mask),
+                             static_cast<__m256i>(m_value));
+  }
+};
+
+template <>
+class where_expression<simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+                       simd<std::int32_t, simd_abi::avx512_fixed_size<8>>>
+    : public const_where_expression<
+          simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+          simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+  where_expression(
+      simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>> const& mask_arg,
+      simd<std::int32_t, simd_abi::avx512_fixed_size<8>>& value_arg)
+      : const_where_expression(mask_arg, value_arg) {}
+  KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION
+  void copy_from(std::int32_t const* mem, element_aligned_tag) {
+    m_value = value_type(_mm256_mask_loadu_epi32(
+        _mm256_set1_epi32(0), static_cast<__mmask8>(m_mask), mem));
+  }
+};
+
+template <>
+class const_where_expression<
+    simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
+    simd<std::int64_t, simd_abi::avx512_fixed_size<8>>> {
+ public:
+  using abi_type   = simd_abi::avx512_fixed_size<8>;
+  using value_type = simd<std::int64_t, abi_type>;
+  using mask_type  = simd_mask<std::int64_t, abi_type>;
+
+ protected:
+  value_type& m_value;
+  mask_type const& m_mask;
+
+ public:
+  const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+      : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+  [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr mask_type const&
+  mask() const {
+    return m_mask;
+  }
+  [[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION constexpr value_type const&
+  value() const {
+    return m_value;
+  }
+};
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION std::int32_t hmax(
+    const_where_expression<
+        simd_mask<std::int32_t, simd_abi::avx512_fixed_size<8>>,
+        simd<std::int32_t, simd_abi::avx512_fixed_size<8>>> const& x) {
+  return _mm512_mask_reduce_max_epi32(
+      static_cast<__mmask8>(x.mask()),
+      _mm512_castsi256_si512(static_cast<__m256i>(x.value())));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION double hmin(
+    const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+                           simd<double, simd_abi::avx512_fixed_size<8>>> const&
+        x) {
+  return _mm512_mask_reduce_min_pd(static_cast<__mmask8>(x.mask()),
+                                   static_cast<__m512d>(x.value()));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION std::int64_t reduce(
+    const_where_expression<
+        simd_mask<std::int64_t, simd_abi::avx512_fixed_size<8>>,
+        simd<std::int64_t, simd_abi::avx512_fixed_size<8>>> const& x,
+    std::int64_t, std::plus<>) {
+  return _mm512_mask_reduce_add_epi64(static_cast<__mmask8>(x.mask()),
+                                      static_cast<__m512i>(x.value()));
+}
+
+[[nodiscard]] KOKKOS_IMPL_HOST_FORCEINLINE_FUNCTION double reduce(
+    const_where_expression<simd_mask<double, simd_abi::avx512_fixed_size<8>>,
+                           simd<double, simd_abi::avx512_fixed_size<8>>> const&
+        x,
+    double, std::plus<>) {
+  return _mm512_mask_reduce_add_pd(static_cast<__mmask8>(x.mask()),
+                                   static_cast<__m512d>(x.value()));
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_Common.hpp b/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_Common.hpp
new file mode 100644 (file)
index 0000000..ae2843b
--- /dev/null
@@ -0,0 +1,428 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SIMD_COMMON_HPP
+#define KOKKOS_SIMD_COMMON_HPP
+
+#include <cmath>
+#include <cstring>
+
+#include <Kokkos_Core.hpp>
+
+namespace Kokkos {
+
+namespace Experimental {
+
+template <class To, class From>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION constexpr To bit_cast(
+    From const& src) {
+  To dst;
+  std::memcpy(&dst, &src, sizeof(To));
+  return dst;
+}
+
+template <class T, class Abi>
+class simd;
+
+template <class T, class Abi>
+class simd_mask;
+
+struct element_aligned_tag {};
+
+// class template declarations for const_where_expression and where_expression
+
+template <class M, class T>
+class const_where_expression {
+ protected:
+  T& m_value;
+  M const& m_mask;
+
+ public:
+  const_where_expression(M const& mask_arg, T const& value_arg)
+      : m_value(const_cast<T&>(value_arg)), m_mask(mask_arg) {}
+  KOKKOS_FORCEINLINE_FUNCTION T const& value() const { return this->m_value; }
+};
+
+template <class M, class T>
+class where_expression : public const_where_expression<M, T> {
+  using base_type = const_where_expression<M, T>;
+
+ public:
+  where_expression(M const& mask_arg, T& value_arg)
+      : base_type(mask_arg, value_arg) {}
+  KOKKOS_FORCEINLINE_FUNCTION T& value() { return this->m_value; }
+};
+
+// specializations of where expression templates for the case when the
+// mask type is bool, to allow generic code to use where() on both
+// SIMD types and non-SIMD builtin arithmetic types
+
+template <class T>
+class const_where_expression<bool, T> {
+ protected:
+  T& m_value;
+  bool m_mask;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION
+  const_where_expression(bool mask_arg, T const& value_arg)
+      : m_value(const_cast<T&>(value_arg)), m_mask(mask_arg) {}
+  KOKKOS_FORCEINLINE_FUNCTION T const& value() const { return this->m_value; }
+};
+
+template <class T>
+class where_expression<bool, T> : public const_where_expression<bool, T> {
+  using base_type = const_where_expression<bool, T>;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION
+  where_expression(bool mask_arg, T& value_arg)
+      : base_type(mask_arg, value_arg) {}
+  KOKKOS_FORCEINLINE_FUNCTION T& value() { return this->m_value; }
+  template <class U,
+            std::enable_if_t<std::is_convertible_v<U, T>, bool> = false>
+  KOKKOS_FORCEINLINE_FUNCTION void operator=(U const& x) {
+    if (this->m_mask) this->m_value = x;
+  }
+};
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+    where_expression<simd_mask<T, Abi>, simd<T, Abi>>
+    where(typename simd<T, Abi>::mask_type const& mask, simd<T, Abi>& value) {
+  return where_expression(mask, value);
+}
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION
+    const_where_expression<simd_mask<T, Abi>, simd<T, Abi>>
+    where(typename simd<T, Abi>::mask_type const& mask,
+          simd<T, Abi> const& value) {
+  return const_where_expression(mask, value);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION where_expression<bool, T> where(
+    bool mask, T& value) {
+  return where_expression(mask, value);
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION const_where_expression<bool, T> where(
+    bool mask, T const& value) {
+  return const_where_expression(mask, value);
+}
+
+// The code below provides:
+// operator@(simd<T, Abi>, Arithmetic)
+// operator@(Arithmetic, simd<T, Abi>)
+// operator@=(simd<T, Abi>&, U&&)
+// operator@=(where_expression<M, T>&, U&&)
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator+(
+    Experimental::simd<T, Abi> const& lhs, U rhs) {
+  using result_member = decltype(lhs[0] + rhs);
+  return Experimental::simd<result_member, Abi>(lhs) +
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator+(
+    U lhs, Experimental::simd<T, Abi> const& rhs) {
+  using result_member = decltype(lhs + rhs[0]);
+  return Experimental::simd<result_member, Abi>(lhs) +
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi>
+KOKKOS_FORCEINLINE_FUNCTION simd<T, Abi>& operator+=(simd<T, Abi>& lhs,
+                                                     U&& rhs) {
+  lhs = lhs + std::forward<U>(rhs);
+  return lhs;
+}
+
+template <class M, class T, class U>
+KOKKOS_FORCEINLINE_FUNCTION where_expression<M, T>& operator+=(
+    where_expression<M, T>& lhs, U&& rhs) {
+  lhs = lhs.value() + std::forward<U>(rhs);
+  return lhs;
+}
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator-(
+    Experimental::simd<T, Abi> const& lhs, U rhs) {
+  using result_member = decltype(lhs[0] - rhs);
+  return Experimental::simd<result_member, Abi>(lhs) -
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator-(
+    U lhs, Experimental::simd<T, Abi> const& rhs) {
+  using result_member = decltype(lhs - rhs[0]);
+  return Experimental::simd<result_member, Abi>(lhs) -
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi>
+KOKKOS_FORCEINLINE_FUNCTION simd<T, Abi>& operator-=(simd<T, Abi>& lhs,
+                                                     U&& rhs) {
+  lhs = lhs - std::forward<U>(rhs);
+  return lhs;
+}
+
+template <class M, class T, class U>
+KOKKOS_FORCEINLINE_FUNCTION where_expression<M, T>& operator-=(
+    where_expression<M, T>& lhs, U&& rhs) {
+  lhs = lhs.value() - std::forward<U>(rhs);
+  return lhs;
+}
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator*(
+    Experimental::simd<T, Abi> const& lhs, U rhs) {
+  using result_member = decltype(lhs[0] * rhs);
+  return Experimental::simd<result_member, Abi>(lhs) *
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator*(
+    U lhs, Experimental::simd<T, Abi> const& rhs) {
+  using result_member = decltype(lhs * rhs[0]);
+  return Experimental::simd<result_member, Abi>(lhs) *
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi>
+KOKKOS_FORCEINLINE_FUNCTION simd<T, Abi>& operator*=(simd<T, Abi>& lhs,
+                                                     U&& rhs) {
+  lhs = lhs * std::forward<U>(rhs);
+  return lhs;
+}
+
+template <class M, class T, class U>
+KOKKOS_FORCEINLINE_FUNCTION where_expression<M, T>& operator*=(
+    where_expression<M, T>& lhs, U&& rhs) {
+  lhs = lhs.value() * std::forward<U>(rhs);
+  return lhs;
+}
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator/(
+    Experimental::simd<T, Abi> const& lhs, U rhs) {
+  using result_member = decltype(lhs[0] / rhs);
+  return Experimental::simd<result_member, Abi>(lhs) /
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi,
+          std::enable_if_t<std::is_arithmetic_v<U>, bool> = false>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION auto operator/(
+    U lhs, Experimental::simd<T, Abi> const& rhs) {
+  using result_member = decltype(lhs / rhs[0]);
+  return Experimental::simd<result_member, Abi>(lhs) /
+         Experimental::simd<result_member, Abi>(rhs);
+}
+
+template <class T, class U, class Abi>
+KOKKOS_FORCEINLINE_FUNCTION simd<T, Abi>& operator/=(simd<T, Abi>& lhs,
+                                                     U&& rhs) {
+  lhs = lhs / std::forward<U>(rhs);
+  return lhs;
+}
+
+template <class M, class T, class U>
+KOKKOS_FORCEINLINE_FUNCTION where_expression<M, T>& operator/=(
+    where_expression<M, T>& lhs, U&& rhs) {
+  lhs = lhs.value() / std::forward<U>(rhs);
+  return lhs;
+}
+
+// implement mask reductions for type bool to allow generic code to accept
+// both simd<double, Abi> and just double
+
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION constexpr bool all_of(bool a) {
+  return a;
+}
+
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION constexpr bool any_of(bool a) {
+  return a;
+}
+
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION constexpr bool none_of(bool a) {
+  return !a;
+}
+
+// fallback implementations of reductions across simd_mask:
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool all_of(
+    simd_mask<T, Abi> const& a) {
+  return a == simd_mask<T, Abi>(true);
+}
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool any_of(
+    simd_mask<T, Abi> const& a) {
+  return a != simd_mask<T, Abi>(false);
+}
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION bool none_of(
+    simd_mask<T, Abi> const& a) {
+  return a == simd_mask<T, Abi>(false);
+}
+
+}  // namespace Experimental
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> min(
+    Experimental::simd<T, Abi> const& a, Experimental::simd<T, Abi> const& b) {
+  Experimental::simd<T, Abi> result;
+  for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) {
+    result[i] = Kokkos::min(a[i], b[i]);
+  }
+  return result;
+}
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<T, Abi> max(
+    Experimental::simd<T, Abi> const& a, Experimental::simd<T, Abi> const& b) {
+  Experimental::simd<T, Abi> result;
+  for (std::size_t i = 0; i < Experimental::simd<T, Abi>::size(); ++i) {
+    result[i] = Kokkos::max(a[i], b[i]);
+  }
+  return result;
+}
+
+// fallback implementations of <cmath> functions.
+// individual Abi types may provide overloads with more efficient
+// implementations.
+// These are not in the Experimental namespace because their double
+// overloads are not either
+
+#define KOKKOS_IMPL_SIMD_UNARY_FUNCTION(FUNC)                               \
+  template <class Abi>                                                      \
+  [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<double, Abi> \
+  FUNC(Experimental::simd<double, Abi> const& a) {                          \
+    Experimental::simd<double, Abi> result;                                 \
+    for (std::size_t i = 0; i < Experimental::simd<double, Abi>::size();    \
+         ++i) {                                                             \
+      result[i] = Kokkos::FUNC(a[i]);                                       \
+    }                                                                       \
+    return result;                                                          \
+  }
+
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(abs)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(exp)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(exp2)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log10)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(log2)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sqrt)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cbrt)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sin)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cos)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tan)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(asin)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(acos)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(atan)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(sinh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(cosh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tanh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(asinh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(acosh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(atanh)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(erf)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(erfc)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(tgamma)
+KOKKOS_IMPL_SIMD_UNARY_FUNCTION(lgamma)
+
+#define KOKKOS_IMPL_SIMD_BINARY_FUNCTION(FUNC)                              \
+  template <class Abi>                                                      \
+  [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<double, Abi> \
+  FUNC(Experimental::simd<double, Abi> const& a,                            \
+       Experimental::simd<double, Abi> const& b) {                          \
+    Experimental::simd<double, Abi> result;                                 \
+    for (std::size_t i = 0; i < Experimental::simd<double, Abi>::size();    \
+         ++i) {                                                             \
+      result[i] = Kokkos::FUNC(a[i], b[i]);                                 \
+    }                                                                       \
+    return result;                                                          \
+  }
+
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(pow)
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(hypot)
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(atan2)
+KOKKOS_IMPL_SIMD_BINARY_FUNCTION(copysign)
+
+#define KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(FUNC)                             \
+  template <class Abi>                                                      \
+  [[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION Experimental::simd<double, Abi> \
+  FUNC(Experimental::simd<double, Abi> const& a,                            \
+       Experimental::simd<double, Abi> const& b,                            \
+       Experimental::simd<double, Abi> const& c) {                          \
+    Experimental::simd<double, Abi> result;                                 \
+    for (std::size_t i = 0; i < Experimental::simd<double, Abi>::size();    \
+         ++i) {                                                             \
+      result[i] = Kokkos::FUNC(a[i], b[i], c[i]);                           \
+    }                                                                       \
+    return result;                                                          \
+  }
+
+KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(fma)
+KOKKOS_IMPL_SIMD_TERNARY_FUNCTION(hypot)
+
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_Scalar.hpp b/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_Scalar.hpp
new file mode 100644 (file)
index 0000000..f0d0669
--- /dev/null
@@ -0,0 +1,353 @@
+/*
+//@HEADER
+// ************************************************************************
+//
+//                        Kokkos v. 3.0
+//       Copyright (2020) National Technology & Engineering
+//               Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of the Corporation nor the names of the
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Questions? Contact Christian R. Trott (crtrott@sandia.gov)
+//
+// ************************************************************************
+//@HEADER
+*/
+
+#ifndef KOKKOS_SIMD_SCALAR_HPP
+#define KOKKOS_SIMD_SCALAR_HPP
+
+#include <type_traits>
+#include <climits>
+#include <cfloat>
+
+#include <Kokkos_SIMD_Common.hpp>
+
+namespace Kokkos {
+namespace Experimental {
+
+namespace simd_abi {
+
+class scalar {};
+
+}  // namespace simd_abi
+
+template <class T>
+class simd_mask<T, simd_abi::scalar> {
+  bool m_value;
+
+ public:
+  using value_type                      = bool;
+  using simd_type                       = simd<T, simd_abi::scalar>;
+  using abi_type                        = simd_abi::scalar;
+  using reference                       = value_type&;
+  KOKKOS_DEFAULTED_FUNCTION simd_mask() = default;
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr std::size_t size() { return 1; }
+  KOKKOS_FORCEINLINE_FUNCTION explicit simd_mask(value_type value)
+      : m_value(value) {}
+  template <class U>
+  KOKKOS_FORCEINLINE_FUNCTION simd_mask(
+      simd_mask<U, simd_abi::scalar> const& other)
+      : m_value(static_cast<bool>(other)) {}
+  KOKKOS_FORCEINLINE_FUNCTION constexpr explicit operator bool() const {
+    return m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION reference operator[](std::size_t) {
+    return m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION value_type operator[](std::size_t) const {
+    return m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd_mask
+  operator||(simd_mask const& other) const {
+    return simd_mask(m_value || other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd_mask
+  operator&&(simd_mask const& other) const {
+    return simd_mask(m_value && other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd_mask operator!() const {
+    return simd_mask(!m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION bool operator==(simd_mask const& other) const {
+    return m_value == other.m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION bool operator!=(simd_mask const& other) const {
+    return m_value != other.m_value;
+  }
+};
+
+template <class T>
+class simd<T, simd_abi::scalar> {
+  T m_value;
+
+ public:
+  using value_type                            = T;
+  using abi_type                              = simd_abi::scalar;
+  using mask_type                             = simd_mask<T, abi_type>;
+  using reference                             = value_type&;
+  KOKKOS_DEFAULTED_FUNCTION simd()            = default;
+  KOKKOS_DEFAULTED_FUNCTION simd(simd const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION simd(simd&&)      = default;
+  KOKKOS_DEFAULTED_FUNCTION simd& operator=(simd const&) = default;
+  KOKKOS_DEFAULTED_FUNCTION simd& operator=(simd&&) = default;
+  KOKKOS_FORCEINLINE_FUNCTION static constexpr std::size_t size() { return 1; }
+  template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+                                      bool> = false>
+  KOKKOS_FORCEINLINE_FUNCTION simd(U&& value) : m_value(value) {}
+  template <class U, std::enable_if_t<std::is_convertible_v<U, value_type>,
+                                      bool> = false>
+  KOKKOS_FORCEINLINE_FUNCTION explicit simd(simd<U, abi_type> const& other)
+      : m_value(static_cast<U>(other)) {}
+  template <class G,
+            std::enable_if_t<
+                // basically, can you do { value_type r =
+                // gen(std::integral_constant<std::size_t, i>()); }
+                std::is_invocable_r_v<value_type, G,
+                                      std::integral_constant<std::size_t, 0>>,
+                bool> = false>
+  KOKKOS_FORCEINLINE_FUNCTION simd(G&& gen)
+      : m_value(gen(std::integral_constant<std::size_t, 0>())) {}
+  KOKKOS_FORCEINLINE_FUNCTION simd operator-() const { return simd(-m_value); }
+  KOKKOS_FORCEINLINE_FUNCTION simd operator>>(int rhs) const {
+    return simd(m_value >> rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd
+  operator>>(simd<int, abi_type> const& rhs) const {
+    return simd(m_value >> static_cast<int>(rhs));
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd operator<<(int rhs) const {
+    return simd(m_value << rhs);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd
+  operator<<(simd<int, abi_type> const& rhs) const {
+    return simd(m_value << static_cast<int>(rhs));
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd operator&(simd const& other) const {
+    return m_value & other.m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION simd operator|(simd const& other) const {
+    return m_value | other.m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION constexpr explicit operator T() const {
+    return m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION mask_type operator<(simd const& other) const {
+    return mask_type(m_value < other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION mask_type operator>(simd const& other) const {
+    return mask_type(m_value > other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION mask_type operator<=(simd const& other) const {
+    return mask_type(m_value <= other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION mask_type operator>=(simd const& other) const {
+    return mask_type(m_value >= other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION mask_type operator==(simd const& other) const {
+    return mask_type(m_value == other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION mask_type operator!=(simd const& other) const {
+    return mask_type(m_value != other.m_value);
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void copy_from(T const* ptr,
+                                             element_aligned_tag) {
+    m_value = *ptr;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION void copy_to(T* ptr, element_aligned_tag) const {
+    *ptr = m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION reference operator[](std::size_t) {
+    return m_value;
+  }
+  KOKKOS_FORCEINLINE_FUNCTION value_type operator[](std::size_t) const {
+    return m_value;
+  }
+};
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator*(
+    simd<T, simd_abi::scalar> const& lhs,
+    simd<T, simd_abi::scalar> const& rhs) {
+  return simd<T, simd_abi::scalar>(static_cast<T>(lhs) * static_cast<T>(rhs));
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator/(
+    simd<T, simd_abi::scalar> const& lhs,
+    simd<T, simd_abi::scalar> const& rhs) {
+  return simd<T, simd_abi::scalar>(static_cast<T>(lhs) / static_cast<T>(rhs));
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator+(
+    simd<T, simd_abi::scalar> const& lhs,
+    simd<T, simd_abi::scalar> const& rhs) {
+  return simd<T, simd_abi::scalar>(static_cast<T>(lhs) + static_cast<T>(rhs));
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> operator-(
+    simd<T, simd_abi::scalar> const& lhs,
+    simd<T, simd_abi::scalar> const& rhs) {
+  return simd<T, simd_abi::scalar>(static_cast<T>(lhs) - static_cast<T>(rhs));
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> abs(
+    simd<T, simd_abi::scalar> const& a) {
+  return simd<T, simd_abi::scalar>(std::abs(static_cast<T>(a)));
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> sqrt(
+    simd<T, simd_abi::scalar> const& a) {
+  return simd<T, simd_abi::scalar>(std::sqrt(static_cast<T>(a)));
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> fma(
+    simd<T, simd_abi::scalar> const& x, simd<T, simd_abi::scalar> const& y,
+    simd<T, simd_abi::scalar> const& z) {
+  return simd<T, simd_abi::scalar>((static_cast<T>(x) * static_cast<T>(y)) +
+                                   static_cast<T>(z));
+}
+
+template <class T>
+KOKKOS_FORCEINLINE_FUNCTION simd<T, simd_abi::scalar> condition(
+    desul::Impl::dont_deduce_this_parameter_t<
+        simd_mask<T, simd_abi::scalar>> const& a,
+    simd<T, simd_abi::scalar> const& b, simd<T, simd_abi::scalar> const& c) {
+  return simd<T, simd_abi::scalar>(static_cast<bool>(a) ? static_cast<T>(b)
+                                                        : static_cast<T>(c));
+}
+
+template <class T, class Abi>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION simd<T, Abi> copysign(
+    simd<T, Abi> const& a, simd<T, Abi> const& b) {
+  return std::copysign(static_cast<T>(a), static_cast<T>(b));
+}
+
+template <class T>
+class const_where_expression<simd_mask<T, simd_abi::scalar>,
+                             simd<T, simd_abi::scalar>> {
+ public:
+  using abi_type   = simd_abi::scalar;
+  using value_type = simd<T, abi_type>;
+  using mask_type  = simd_mask<T, abi_type>;
+
+ protected:
+  value_type& m_value;
+  mask_type const& m_mask;
+
+ public:
+  KOKKOS_FORCEINLINE_FUNCTION
+  const_where_expression(mask_type const& mask_arg, value_type const& value_arg)
+      : m_value(const_cast<value_type&>(value_arg)), m_mask(mask_arg) {}
+  KOKKOS_FORCEINLINE_FUNCTION
+  mask_type const& mask() const { return m_mask; }
+  KOKKOS_FORCEINLINE_FUNCTION
+  value_type const& value() const { return m_value; }
+  KOKKOS_FORCEINLINE_FUNCTION
+  void copy_to(T* mem, element_aligned_tag) const {
+    if (static_cast<bool>(m_mask)) *mem = static_cast<T>(m_value);
+  }
+  template <class Integral>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<std::is_integral_v<Integral>>
+  scatter_to(T* mem, simd<Integral, simd_abi::scalar> const& index) const {
+    if (static_cast<bool>(m_mask))
+      mem[static_cast<Integral>(index)] = static_cast<T>(m_value);
+  }
+};
+
+template <class T>
+class where_expression<simd_mask<T, simd_abi::scalar>,
+                       simd<T, simd_abi::scalar>>
+    : public const_where_expression<simd_mask<T, simd_abi::scalar>,
+                                    simd<T, simd_abi::scalar>> {
+  using base_type = const_where_expression<simd_mask<T, simd_abi::scalar>,
+                                           simd<T, simd_abi::scalar>>;
+
+ public:
+  using typename base_type::value_type;
+  KOKKOS_FORCEINLINE_FUNCTION
+  where_expression(simd_mask<T, simd_abi::scalar> const& mask_arg,
+                   simd<T, simd_abi::scalar>& value_arg)
+      : base_type(mask_arg, value_arg) {}
+  KOKKOS_FORCEINLINE_FUNCTION
+  void copy_from(T const* mem, element_aligned_tag) {
+    if (static_cast<bool>(this->m_mask)) this->m_value = *mem;
+  }
+  template <class Integral>
+  KOKKOS_FORCEINLINE_FUNCTION std::enable_if_t<std::is_integral_v<Integral>>
+  gather_from(T const* mem, simd<Integral, simd_abi::scalar> const& index) {
+    if (static_cast<bool>(this->m_mask))
+      this->m_value = mem[static_cast<Integral>(index)];
+  }
+  template <class U, std::enable_if_t<
+                         std::is_convertible_v<U, simd<T, simd_abi::scalar>>,
+                         bool> = false>
+  KOKKOS_FORCEINLINE_FUNCTION void operator=(U&& x) {
+    if (static_cast<bool>(this->m_mask))
+      this->m_value =
+          static_cast<simd<T, simd_abi::scalar>>(std::forward<U>(x));
+  }
+};
+
+template <class T, class BinaryOp>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
+reduce(const_where_expression<simd_mask<T, simd_abi::scalar>,
+                              simd<T, simd_abi::scalar>> const& x,
+       T identity_element, BinaryOp) {
+  return static_cast<bool>(x.mask()) ? static_cast<T>(x.value())
+                                     : identity_element;
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
+hmax(const_where_expression<simd_mask<T, simd_abi::scalar>,
+                            simd<T, simd_abi::scalar>> const& x) {
+  return static_cast<bool>(x.mask()) ? static_cast<T>(x.value())
+                                     : Kokkos::reduction_identity<T>::max();
+}
+
+template <class T>
+[[nodiscard]] KOKKOS_FORCEINLINE_FUNCTION T
+hmin(const_where_expression<simd_mask<T, simd_abi::scalar>,
+                            simd<T, simd_abi::scalar>> const& x) {
+  return static_cast<bool>(x.mask()) ? static_cast<T>(x.value())
+                                     : Kokkos::reduction_identity<T>::min();
+}
+
+}  // namespace Experimental
+}  // namespace Kokkos
+
+#endif
diff --git a/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_dummy.cpp b/bundled/kokkos-3.7.00/simd/src/Kokkos_SIMD_dummy.cpp
new file mode 100644 (file)
index 0000000..d273cb6
--- /dev/null
@@ -0,0 +1,7 @@
+// This file is needed in order to get the linker language
+// for the header only submodule.
+// While we set the language properties in our normal cmake
+// path it does not get set in the Trilinos environment.
+// Furthermore, setting LINKER_LANGUAGE is only supported
+// in CMAKE 3.19 and up.
+void KOKKOS_SIMD_SRC_DUMMY_PREVENT_LINK_ERROR() {}
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/.clang-format b/bundled/kokkos-3.7.00/tpls/desul/include/desul/.clang-format
new file mode 100644 (file)
index 0000000..7968b43
--- /dev/null
@@ -0,0 +1,3 @@
+DisableFormat: true
+SortIncludes: false
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics.hpp
new file mode 100644 (file)
index 0000000..8ba5b0f
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_HPP_
+#define DESUL_ATOMICS_HPP_
+
+#include "desul/atomics/Atomic_Ref.hpp"
+#include "desul/atomics/Compare_Exchange.hpp"
+#include "desul/atomics/Generic.hpp"
+#include "desul/atomics/Lock_Array.hpp"
+#include "desul/atomics/Macros.hpp"
+
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Atomic_Ref.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Atomic_Ref.hpp
new file mode 100644 (file)
index 0000000..fbf2dcf
--- /dev/null
@@ -0,0 +1,541 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMIC_REF_IMPL_HPP_
+#define DESUL_ATOMIC_REF_IMPL_HPP_
+
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Generic.hpp"
+#include "desul/atomics/Macros.hpp"
+
+namespace desul {
+namespace Impl {
+
+// TODO current implementation is missing the following:
+// * member functions
+//   * wait
+//   * notify_one
+//   * notify_all
+
+template <typename T,
+          typename MemoryOrder,
+          typename MemoryScope,
+          bool = std::is_integral<T>{},
+          bool = std::is_floating_point<T>{}>
+struct basic_atomic_ref;
+
+// base class for non-integral, non-floating-point, non-pointer types
+template <typename T, typename MemoryOrder, typename MemoryScope>
+struct basic_atomic_ref<T, MemoryOrder, MemoryScope, false, false> {
+  static_assert(std::is_trivially_copyable<T>{}, "");
+
+ private:
+  T* _ptr;
+
+  // 1/2/4/8/16-byte types must be aligned to at least their size
+  static constexpr int _min_alignment = (sizeof(T) & (sizeof(T) - 1)) || sizeof(T) > 16
+                                            ? 0
+                                            : sizeof(T);
+
+ public:
+  using value_type = T;
+
+  static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
+
+  static constexpr std::size_t required_alignment = _min_alignment > alignof(T)
+                                                        ? _min_alignment
+                                                        : alignof(T);
+
+  basic_atomic_ref() = delete;
+  basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
+
+  basic_atomic_ref(basic_atomic_ref const&) = default;
+
+  explicit basic_atomic_ref(T& obj) : _ptr(std::addressof(obj)) {}
+
+  T operator=(T desired) const noexcept {
+    this->store(desired);
+    return desired;
+  }
+
+  operator T() const noexcept { return this->load(); }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION void store(T desired,
+                            _MemoryOrder order = _MemoryOrder()) const noexcept {
+    atomic_store(_ptr, desired, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T load(_MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T exchange(T desired,
+                            _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, desired, order, MemoryScope());
+  }
+
+  DESUL_FUNCTION bool is_lock_free() const noexcept {
+    return atomic_is_lock_free<sizeof(T), required_alignment>();
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(T& expected,
+                                            T desired,
+                                            SuccessMemoryOrder success,
+                                            FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_weak(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(
+      T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_weak(expected,
+                                 desired,
+                                 order,
+                                 cmpexch_failure_memory_order<_MemoryOrder>(),
+                                 MemoryScope());
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T& expected,
+      T desired,
+      SuccessMemoryOrder success,
+      FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_strong(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_strong(expected,
+                                   desired,
+                                   order,
+                                   cmpexch_failure_memory_order<_MemoryOrder>(),
+                                   MemoryScope());
+  }
+};
+
+// base class for atomic_ref<integral-type>
+template <typename T, typename MemoryOrder, typename MemoryScope>
+struct basic_atomic_ref<T, MemoryOrder, MemoryScope, true, false> {
+  static_assert(std::is_integral<T>{}, "");
+
+ private:
+  T* _ptr;
+
+ public:
+  using value_type = T;
+  using difference_type = value_type;
+
+  static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
+
+  static constexpr std::size_t required_alignment = sizeof(T) > alignof(T) ? sizeof(T)
+                                                                           : alignof(T);
+
+  basic_atomic_ref() = delete;
+  basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
+
+  explicit basic_atomic_ref(T& obj) : _ptr(&obj) {}
+
+  basic_atomic_ref(basic_atomic_ref const&) = default;
+
+  T operator=(T desired) const noexcept {
+    this->store(desired);
+    return desired;
+  }
+
+  operator T() const noexcept { return this->load(); }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION void store(T desired,
+                            _MemoryOrder order = _MemoryOrder()) const noexcept {
+    atomic_store(_ptr, desired, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T load(_MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T exchange(T desired,
+                            _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, desired, order, MemoryScope());
+  }
+
+  DESUL_FUNCTION bool is_lock_free() const noexcept {
+    return atomic_is_lock_free<sizeof(T), required_alignment>();
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(T& expected,
+                                            T desired,
+                                            SuccessMemoryOrder success,
+                                            FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_weak(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(
+      T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_weak(expected,
+                                 desired,
+                                 order,
+                                 cmpexch_failure_memory_order<_MemoryOrder>(),
+                                 MemoryScope());
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T& expected,
+      T desired,
+      SuccessMemoryOrder success,
+      FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_strong(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_strong(expected,
+                                   desired,
+                                   order,
+                                   cmpexch_failure_memory_order<_MemoryOrder>(),
+                                   MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_add(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_add(_ptr, arg, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_sub(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_sub(_ptr, arg, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_and(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_and(_ptr, arg, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_or(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_or(_ptr, arg, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_xor(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_xor(_ptr, arg, order, MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator++() const noexcept {
+    return atomic_add_fetch(_ptr, value_type(1), MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator++(int) const noexcept { return fetch_add(1); }
+
+  DESUL_FUNCTION value_type operator--() const noexcept {
+    return atomic_sub_fetch(_ptr, value_type(1), MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator--(int) const noexcept { return fetch_sub(1); }
+
+  DESUL_FUNCTION value_type operator+=(value_type arg) const noexcept {
+    atomic_add_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator-=(value_type arg) const noexcept {
+    atomic_sub_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator&=(value_type arg) const noexcept {
+    atomic_and_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator|=(value_type arg) const noexcept {
+    atomic_or_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator^=(value_type arg) const noexcept {
+    atomic_xor_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
+  }
+};
+
+// base class for atomic_ref<floating-point-type>
+template <typename T, typename MemoryOrder, typename MemoryScope>
+struct basic_atomic_ref<T, MemoryOrder, MemoryScope, false, true> {
+  static_assert(std::is_floating_point<T>{}, "");
+
+ private:
+  T* _ptr;
+
+ public:
+  using value_type = T;
+  using difference_type = value_type;
+
+  static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
+
+  static constexpr std::size_t required_alignment = alignof(T);
+
+  basic_atomic_ref() = delete;
+  basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
+
+  explicit basic_atomic_ref(T& obj) : _ptr(&obj) {}
+
+  basic_atomic_ref(basic_atomic_ref const&) = default;
+
+  T operator=(T desired) const noexcept {
+    this->store(desired);
+    return desired;
+  }
+
+  operator T() const noexcept { return this->load(); }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION void store(T desired,
+                            _MemoryOrder order = _MemoryOrder()) const noexcept {
+    atomic_store(_ptr, desired, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T load(_MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T exchange(T desired,
+                            _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, desired, order, MemoryScope());
+  }
+
+  DESUL_FUNCTION bool is_lock_free() const noexcept {
+    return atomic_is_lock_free<sizeof(T), required_alignment>();
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(T& expected,
+                                            T desired,
+                                            SuccessMemoryOrder success,
+                                            FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_weak(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(
+      T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_weak(expected,
+                                 desired,
+                                 order,
+                                 cmpexch_failure_memory_order<_MemoryOrder>(),
+                                 MemoryScope());
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T& expected,
+      T desired,
+      SuccessMemoryOrder success,
+      FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_strong(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T& expected, T desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_strong(expected,
+                                   desired,
+                                   order,
+                                   cmpexch_failure_memory_order<_MemoryOrder>(),
+                                   MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_add(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_add(_ptr, arg, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_sub(value_type arg, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_sub(_ptr, arg, order, MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator+=(value_type arg) const noexcept {
+    atomic_add_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator-=(value_type arg) const noexcept {
+    atomic_sub_fetch(_ptr, arg, MemoryOrder(), MemoryScope());
+  }
+};
+
+// base class for atomic_ref<pointer-type>
+template <typename T, typename MemoryOrder, typename MemoryScope>
+struct basic_atomic_ref<T*, MemoryOrder, MemoryScope, false, false> {
+ private:
+  T** _ptr;
+
+ public:
+  using value_type = T*;
+  using difference_type = std::ptrdiff_t;
+
+  static constexpr bool is_always_lock_free = atomic_always_lock_free(sizeof(T));
+
+  static constexpr std::size_t required_alignment = alignof(T*);
+
+  basic_atomic_ref() = delete;
+  basic_atomic_ref& operator=(basic_atomic_ref const&) = delete;
+
+  explicit basic_atomic_ref(T*& arg) : _ptr(std::addressof(arg)) {}
+
+  basic_atomic_ref(basic_atomic_ref const&) = default;
+
+  T* operator=(T* desired) const noexcept {
+    this->store(desired);
+    return desired;
+  }
+
+  operator T*() const noexcept { return this->load(); }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION void store(T* desired,
+                            _MemoryOrder order = _MemoryOrder()) const noexcept {
+    atomic_store(_ptr, desired, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T* load(_MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION T* exchange(T* desired,
+                             _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_load(_ptr, desired, order, MemoryScope());
+  }
+
+  DESUL_FUNCTION bool is_lock_free() const noexcept {
+    return atomic_is_lock_free<sizeof(T*), required_alignment>();
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(T*& expected,
+                                            T* desired,
+                                            SuccessMemoryOrder success,
+                                            FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_weak(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_weak(
+      T*& expected, T* desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_weak(expected,
+                                 desired,
+                                 order,
+                                 cmpexch_failure_memory_order<_MemoryOrder>(),
+                                 MemoryScope());
+  }
+
+  template <typename SuccessMemoryOrder, typename FailureMemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T*& expected,
+      T* desired,
+      SuccessMemoryOrder success,
+      FailureMemoryOrder failure) const noexcept {
+    return atomic_compare_exchange_strong(
+        _ptr, expected, desired, success, failure, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION bool compare_exchange_strong(
+      T*& expected, T* desired, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return compare_exchange_strong(expected,
+                                   desired,
+                                   order,
+                                   cmpexch_failure_memory_order<_MemoryOrder>(),
+                                   MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_add(difference_type d, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_add(_ptr, _type_size(d), order, MemoryScope());
+  }
+
+  template <typename _MemoryOrder = MemoryOrder>
+  DESUL_FUNCTION value_type
+  fetch_sub(difference_type d, _MemoryOrder order = _MemoryOrder()) const noexcept {
+    return atomic_fetch_sub(_ptr, _type_size(d), order, MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator++() const noexcept {
+    return atomic_add_fetch(_ptr, _type_size(1), MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator++(int) const noexcept { return fetch_add(1); }
+
+  DESUL_FUNCTION value_type operator--() const noexcept {
+    return atomic_sub_fetch(_ptr, _type_size(1), MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator--(int) const noexcept { return fetch_sub(1); }
+
+  DESUL_FUNCTION value_type operator+=(difference_type d) const noexcept {
+    atomic_add_fetch(_ptr, _type_size(d), MemoryOrder(), MemoryScope());
+  }
+
+  DESUL_FUNCTION value_type operator-=(difference_type d) const noexcept {
+    atomic_sub_fetch(_ptr, _type_size(d), MemoryOrder(), MemoryScope());
+  }
+
+ private:
+  static constexpr std::ptrdiff_t _type_size(std::ptrdiff_t d) noexcept {
+    static_assert(std::is_object<T>{}, "");
+    return d * sizeof(T);
+  }
+};
+
+}  // namespace Impl
+
+template <typename T, typename MemoryOrder, typename MemoryScope>
+struct scoped_atomic_ref : Impl::basic_atomic_ref<T, MemoryOrder, MemoryScope> {
+  explicit scoped_atomic_ref(T& obj) noexcept
+      : Impl::basic_atomic_ref<T, MemoryOrder, MemoryScope>(obj) {}
+
+  scoped_atomic_ref& operator=(scoped_atomic_ref const&) = delete;
+
+  scoped_atomic_ref(scoped_atomic_ref const&) = default;
+
+  using Impl::basic_atomic_ref<T, MemoryOrder, MemoryScope>::operator=;
+};
+
+}  // namespace desul
+
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/CUDA.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/CUDA.hpp
new file mode 100644 (file)
index 0000000..87c0df4
--- /dev/null
@@ -0,0 +1,664 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_CUDA_HPP_
+#define DESUL_ATOMICS_CUDA_HPP_
+
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+// When building with Clang we need to include the device functions always since Clang
+// must see a consistent overload set in both device and host compilation, but that
+// means we need to know on the host what to make visible, i.e. we need a host side
+// compile knowledge of architecture.
+#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)) || \
+    (!defined(__NVCC__) && !defined(DESUL_CUDA_ARCH_IS_PRE_VOLTA))
+#define DESUL_HAVE_CUDA_ATOMICS_ASM
+#include <desul/atomics/cuda/CUDA_asm.hpp>
+#endif
+
+#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)) || \
+    (!defined(__NVCC__) && !defined(DESUL_HAVE_CUDA_ATOMICS_ASM))
+namespace desul {
+namespace Impl {
+template <class T>
+struct is_cuda_atomic_integer_type {
+  static constexpr bool value = std::is_same<T, int>::value ||
+                                std::is_same<T, unsigned int>::value ||
+                                std::is_same<T, unsigned long long int>::value;
+};
+
+template <class T>
+struct is_cuda_atomic_add_type {
+  static constexpr bool value = is_cuda_atomic_integer_type<T>::value ||
+#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
+                                std::is_same<T, double>::value ||
+#endif
+                                std::is_same<T, float>::value;
+};
+
+template <class T>
+struct is_cuda_atomic_sub_type {
+  static constexpr bool value =
+      std::is_same<T, int>::value || std::is_same<T, unsigned int>::value;
+};
+}  // namespace Impl
+
+// Atomic Add
+template <class T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
+    atomic_fetch_add(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicAdd(dest, val);
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
+    atomic_fetch_add(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicAdd(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
+    atomic_fetch_add(T* dest, T val, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_add(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic Sub
+template <class T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
+    atomic_fetch_sub(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicSub(dest, val);
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
+    atomic_fetch_sub(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicSub(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
+    atomic_fetch_sub(T* dest, T val, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_sub(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Wrap around atomic add
+__device__ inline unsigned int atomic_fetch_inc_mod(unsigned int* dest,
+                                                    unsigned int val,
+                                                    MemoryOrderRelaxed,
+                                                    MemoryScopeDevice) {
+  return atomicInc(dest, val);
+}
+
+template <typename MemoryOrder>
+__device__ inline unsigned int atomic_fetch_inc_mod(unsigned int* dest,
+                                                    unsigned int val,
+                                                    MemoryOrder,
+                                                    MemoryScopeDevice) {
+  __threadfence();
+  unsigned int return_val = atomicInc(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <typename MemoryOrder>
+__device__ inline unsigned int atomic_fetch_inc_mod(unsigned int* dest,
+                                                    unsigned int val,
+                                                    MemoryOrder,
+                                                    MemoryScopeCore) {
+  return atomic_fetch_inc_mod(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Wrap around atomic sub
+__device__ inline unsigned int atomic_fetch_dec_mod(unsigned int* dest,
+                                                    unsigned int val,
+                                                    MemoryOrderRelaxed,
+                                                    MemoryScopeDevice) {
+  return atomicDec(dest, val);
+}
+
+template <typename MemoryOrder>
+__device__ inline unsigned int atomic_fetch_dec_mod(unsigned int* dest,
+                                                    unsigned int val,
+                                                    MemoryOrder,
+                                                    MemoryScopeDevice) {
+  __threadfence();
+  unsigned int return_val = atomicDec(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <typename MemoryOrder>
+__device__ inline unsigned int atomic_fetch_dec_mod(unsigned int* dest,
+                                                    unsigned int val,
+                                                    MemoryOrder,
+                                                    MemoryScopeCore) {
+  return atomic_fetch_dec_mod(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic Inc
+template <typename T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
+    atomic_fetch_inc(T* dest, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicAdd(dest, T(1));
+}
+
+template <typename T, typename MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
+    atomic_fetch_inc(T* dest, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicAdd(dest, T(1));
+  __threadfence();
+
+  return return_val;
+}
+
+template <typename T, typename MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_add_type<T>::value, T>
+    atomic_fetch_inc(T* dest, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_add(dest, T(1), MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic Dec
+template <typename T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
+    atomic_fetch_dec(T* dest, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicSub(dest, T(1));
+}
+
+template <typename T, typename MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
+    atomic_fetch_dec(T* dest, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicSub(dest, T(1));
+  __threadfence();
+  return return_val;
+}
+
+template <typename T, typename MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_sub_type<T>::value, T>
+    atomic_fetch_dec(T* dest, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_sub(dest, T(1), MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic Max
+template <class T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_max(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicMax(dest, val);
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_max(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicMax(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_max(T* dest, T val, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_max(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic Min
+template <class T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_min(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicMin(dest, val);
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_min(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicMin(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_min(T* dest, T val, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_min(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic And
+template <class T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_and(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicAnd(dest, val);
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_and(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicAnd(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_and(T* dest, T val, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_and(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic XOR
+template <class T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_xor(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicXor(dest, val);
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_xor(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicXor(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_xor(T* dest, T val, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_xor(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+
+// Atomic OR
+template <class T>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_or(T* dest, T val, MemoryOrderRelaxed, MemoryScopeDevice) {
+  return atomicOr(dest, val);
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_or(T* dest, T val, MemoryOrder, MemoryScopeDevice) {
+  __threadfence();
+  T return_val = atomicOr(dest, val);
+  __threadfence();
+  return return_val;
+}
+
+template <class T, class MemoryOrder>
+__device__ inline
+    std::enable_if_t<Impl::is_cuda_atomic_integer_type<T>::value, T>
+    atomic_fetch_or(T* dest, T val, MemoryOrder, MemoryScopeCore) {
+  return atomic_fetch_or(dest, val, MemoryOrder(), MemoryScopeDevice());
+}
+}  // namespace desul
+#endif
+
+#if !defined(__NVCC__)
+// Functions defined as device functions in CUDA which don't exist in the GCC overload
+// set
+namespace desul {
+
+#if defined(DESUL_HAVE_CUDA_ATOMICS_ASM)
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(TYPE, ORDER, SCOPE)                      \
+  inline void atomic_add(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
+    (void)atomic_fetch_add(dest, val, order, scope);                             \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(int32_t, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(long,
+                                MemoryOrderRelaxed,
+                                MemoryScopeDevice);  // only for ASM?
+DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(unsigned int, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(unsigned long long,
+                                MemoryOrderRelaxed,
+                                MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(float, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_ADD(double, MemoryOrderRelaxed, MemoryScopeDevice);
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(TYPE, ORDER, SCOPE)                      \
+  inline void atomic_sub(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
+    (void)atomic_fetch_sub(dest, val, order, scope);                             \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(int32_t, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(long,
+                                MemoryOrderRelaxed,
+                                MemoryScopeDevice);  // only for ASM?
+DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(unsigned int, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(float, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_SUB(double, MemoryOrderRelaxed, MemoryScopeDevice);
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_INC(TYPE, ORDER, SCOPE)            \
+  inline void atomic_inc(TYPE* const dest, ORDER order, SCOPE scope) { \
+    (void)atomic_fetch_inc(dest, order, scope);                        \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_INC(unsigned int,
+                                MemoryOrderRelaxed,
+                                MemoryScopeDevice);  // only for ASM?
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_DEC(TYPE, ORDER, SCOPE)            \
+  inline void atomic_dec(TYPE* const dest, ORDER order, SCOPE scope) { \
+    (void)atomic_fetch_dec(dest, order, scope);                        \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_DEC(unsigned,
+                                MemoryOrderRelaxed,
+                                MemoryScopeDevice);  // only for ASM?
+
+#endif  // DESUL_HAVE_CUDA_ATOMICS_ASM
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_INC_MOD(TYPE, ORDER, SCOPE)                      \
+  inline TYPE atomic_fetch_inc_mod(TYPE* dest, TYPE val, ORDER order, SCOPE scope) { \
+    using cas_t = typename Impl::atomic_compare_exchange_type<sizeof(TYPE)>::type;   \
+    cas_t oldval = reinterpret_cast<cas_t&>(*dest);                                  \
+    cas_t assume = oldval;                                                           \
+    do {                                                                             \
+      assume = oldval;                                                               \
+      TYPE newval = (reinterpret_cast<TYPE&>(assume) >= val)                         \
+                        ? static_cast<TYPE>(0)                                       \
+                        : reinterpret_cast<TYPE&>(assume) + static_cast<TYPE>(1);    \
+      oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest),        \
+                                              assume,                                \
+                                              reinterpret_cast<cas_t&>(newval),      \
+                                              order,                                 \
+                                              scope);                                \
+    } while (assume != oldval);                                                      \
+    return reinterpret_cast<TYPE&>(oldval);                                          \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_INC_MOD(unsigned int,
+                                    MemoryOrderRelaxed,
+                                    MemoryScopeDevice);
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_DEC_MOD(TYPE, ORDER, SCOPE)                      \
+  inline TYPE atomic_fetch_dec_mod(TYPE* dest, TYPE val, ORDER order, SCOPE scope) { \
+    using cas_t = typename Impl::atomic_compare_exchange_type<sizeof(TYPE)>::type;   \
+    cas_t oldval = reinterpret_cast<cas_t&>(*dest);                                  \
+    cas_t assume = oldval;                                                           \
+    do {                                                                             \
+      assume = oldval;                                                               \
+      TYPE newval = ((reinterpret_cast<TYPE&>(assume) == static_cast<TYPE>(0)) |     \
+                     (reinterpret_cast<TYPE&>(assume) > val))                        \
+                        ? val                                                        \
+                        : reinterpret_cast<TYPE&>(assume) - static_cast<TYPE>(1);    \
+      oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest),        \
+                                              assume,                                \
+                                              reinterpret_cast<cas_t&>(newval),      \
+                                              order,                                 \
+                                              scope);                                \
+    } while (assume != oldval);                                                      \
+    return reinterpret_cast<TYPE&>(oldval);                                          \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_DEC_MOD(unsigned int,
+                                    MemoryOrderRelaxed,
+                                    MemoryScopeDevice);
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_ADD(TYPE, ORDER, SCOPE)                      \
+  inline TYPE atomic_fetch_add(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
+    return Impl::atomic_fetch_oper(                                                    \
+        Impl::AddOper<TYPE, const TYPE>(), dest, val, order, scope);                   \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_ADD(float, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_ADD(double, MemoryOrderRelaxed, MemoryScopeDevice);
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_SUB(TYPE, ORDER, SCOPE)                      \
+  inline TYPE atomic_fetch_sub(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
+    return Impl::atomic_fetch_oper(                                                    \
+        Impl::SubOper<TYPE, const TYPE>(), dest, val, order, scope);                   \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_SUB(float, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_SUB(double, MemoryOrderRelaxed, MemoryScopeDevice);
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(TYPE, ORDER, SCOPE)                      \
+  inline TYPE atomic_fetch_max(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
+    return Impl::atomic_fetch_oper(                                                    \
+        Impl::MaxOper<TYPE, const TYPE>(), dest, val, order, scope);                   \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(int, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(long,
+                                      MemoryOrderRelaxed,
+                                      MemoryScopeDevice);  // only for ASM?
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(unsigned int,
+                                      MemoryOrderRelaxed,
+                                      MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(unsigned long,
+                                      MemoryOrderRelaxed,
+                                      MemoryScopeDevice);
+//  DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MAX(unsigned long
+//  long,MemoryOrderRelaxed,MemoryScopeDevice);
+
+#define DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(TYPE, ORDER, SCOPE)                      \
+  inline TYPE atomic_fetch_min(TYPE* const dest, TYPE val, ORDER order, SCOPE scope) { \
+    return Impl::atomic_fetch_oper(                                                    \
+        Impl::MinOper<TYPE, const TYPE>(), dest, val, order, scope);                   \
+  }
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(int, MemoryOrderRelaxed, MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(long,
+                                      MemoryOrderRelaxed,
+                                      MemoryScopeDevice);  // only for ASM?
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(unsigned int,
+                                      MemoryOrderRelaxed,
+                                      MemoryScopeDevice);
+DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(unsigned long,
+                                      MemoryOrderRelaxed,
+                                      MemoryScopeDevice);
+//  DESUL_IMPL_CUDA_HOST_ATOMIC_FETCH_MIN(unsigned long
+//  long,MemoryOrderRelaxed,MemoryScopeDevice); inline void atomic_fetch_max(int32_t*
+//  const dest, int32_t val, MemoryOrderRelaxed order, MemoryScopeDevice scope) {
+
+}  // namespace desul
+
+// Functions defined int the GCC overload set but not in the device overload set
+namespace desul {
+__device__ inline unsigned long long atomic_fetch_add(unsigned long long* const dest,
+                                                      unsigned long long val,
+                                                      MemoryOrderRelaxed order,
+                                                      MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::AddOper<unsigned long long, const unsigned long long>(),
+      dest,
+      val,
+      order,
+      scope);
+}
+__device__ inline long long atomic_fetch_add(long long* const dest,
+                                             long long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::AddOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_fetch_add(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::AddOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_fetch_sub(long long* const dest,
+                                             long long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::SubOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_fetch_sub(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::SubOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_fetch_max(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::MaxOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_fetch_min(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::MinOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_fetch_or(long* const dest,
+                                       long val,
+                                       MemoryOrderRelaxed order,
+                                       MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::OrOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_fetch_or(long long* const dest,
+                                            long long val,
+                                            MemoryOrderRelaxed order,
+                                            MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::OrOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_fetch_xor(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::XorOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_fetch_xor(long long* const dest,
+                                             long long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::XorOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_fetch_and(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::AndOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_fetch_and(long long* const dest,
+                                             long long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::AndOper<long long, const long long>(), dest, val, order, scope);
+}
+
+__device__ inline unsigned long long atomic_add_fetch(unsigned long long* const dest,
+                                                      unsigned long long val,
+                                                      MemoryOrderRelaxed order,
+                                                      MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::AddOper<unsigned long long, const unsigned long long>(),
+      dest,
+      val,
+      order,
+      scope);
+}
+__device__ inline long long atomic_add_fetch(long long* const dest,
+                                             long long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::AddOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_add_fetch(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::AddOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_sub_fetch(long long* const dest,
+                                             long long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::SubOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_sub_fetch(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::SubOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_or_fetch(long long* const dest,
+                                            long long val,
+                                            MemoryOrderRelaxed order,
+                                            MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::OrOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_or_fetch(long* const dest,
+                                       long val,
+                                       MemoryOrderRelaxed order,
+                                       MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::OrOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_xor_fetch(long long* const dest,
+                                             long long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::XorOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_xor_fetch(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::XorOper<long, const long>(), dest, val, order, scope);
+}
+__device__ inline long long atomic_and_fetch(long long* const dest,
+                                             long val,
+                                             MemoryOrderRelaxed order,
+                                             MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::AndOper<long long, const long long>(), dest, val, order, scope);
+}
+__device__ inline long atomic_and_fetch(long* const dest,
+                                        long val,
+                                        MemoryOrderRelaxed order,
+                                        MemoryScopeDevice scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::AndOper<long, const long>(), dest, val, order, scope);
+}
+}  // namespace desul
+#endif
+
+#endif  // DESUL_HAVE_CUDA_ATOMICS
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Common.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Common.hpp
new file mode 100644 (file)
index 0000000..aef098e
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMMON_HPP_
+#define DESUL_ATOMICS_COMMON_HPP_
+#include <atomic>
+#include <cstdint>
+#include <type_traits>
+
+#include "desul/atomics/Macros.hpp"
+
+namespace desul {
+struct alignas(16) Dummy16ByteValue {
+  int64_t value1;
+  int64_t value2;
+  bool operator!=(Dummy16ByteValue v) const {
+    return (value1 != v.value1) || (value2 != v.value2);
+  }
+  bool operator==(Dummy16ByteValue v) const {
+    return (value1 == v.value1) && (value2 == v.value2);
+  }
+};
+}  // namespace desul
+
+// MemoryOrder Tags
+
+namespace desul {
+// Memory order sequential consistent
+struct MemoryOrderSeqCst {};
+// Memory order acquire release
+struct MemoryOrderAcqRel {};
+// Memory order acquire
+struct MemoryOrderAcquire {};
+// Memory order release
+struct MemoryOrderRelease {};
+// Memory order relaxed
+struct MemoryOrderRelaxed {};
+}  // namespace desul
+
+// Memory Scope Tags
+
+namespace desul {
+// Entire machine scope (e.g. for global arrays)
+struct MemoryScopeSystem {};
+// Node level
+struct MemoryScopeNode {};
+// Device or socket scope (i.e. a CPU socket, a single GPU)
+struct MemoryScopeDevice {};
+// Core scoped (i.e. a shared Level 1 cache)
+struct MemoryScopeCore {};
+// Caller scoped (i.e. NOT atomic!)
+struct MemoryScopeCaller {};
+}  // namespace desul
+
+#ifndef __ATOMIC_RELAXED
+#define __ATOMIC_RELAXED 0
+#define __ATOMIC_CONSUME 1
+#define __ATOMIC_ACQUIRE 2
+#define __ATOMIC_RELEASE 3
+#define __ATOMIC_ACQ_REL 4
+#define __ATOMIC_SEQ_CST 5
+#endif
+
+namespace desul {
+template <class MemoryOrderDesul>
+struct GCCMemoryOrder;
+
+template <>
+struct GCCMemoryOrder<MemoryOrderRelaxed> {
+  static constexpr int value = __ATOMIC_RELAXED;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderAcquire> {
+  static constexpr int value = __ATOMIC_ACQUIRE;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderRelease> {
+  static constexpr int value = __ATOMIC_RELEASE;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderAcqRel> {
+  static constexpr int value = __ATOMIC_ACQ_REL;
+};
+
+template <>
+struct GCCMemoryOrder<MemoryOrderSeqCst> {
+  static constexpr int value = __ATOMIC_SEQ_CST;
+};
+
+template <class MemoryOrderDesul>
+struct CXXMemoryOrder;
+
+template <>
+struct CXXMemoryOrder<MemoryOrderRelaxed> {
+  static constexpr std::memory_order value = std::memory_order_relaxed;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderAcquire> {
+  static constexpr std::memory_order value = std::memory_order_acquire;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderRelease> {
+  static constexpr std::memory_order value = std::memory_order_release;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderAcqRel> {
+  static constexpr std::memory_order value = std::memory_order_acq_rel;
+};
+
+template <>
+struct CXXMemoryOrder<MemoryOrderSeqCst> {
+  static constexpr std::memory_order value = std::memory_order_seq_cst;
+};
+
+namespace Impl {
+template <typename MemoryOrder>
+struct CmpExchFailureOrder {
+  using memory_order = std::conditional_t<
+      std::is_same<MemoryOrder, MemoryOrderAcqRel>{},
+      MemoryOrderAcquire,
+      std::conditional_t<std::is_same<MemoryOrder, MemoryOrderRelease>{},
+                         MemoryOrderRelaxed,
+                         MemoryOrder>>;
+};
+template <typename MemoryOrder>
+using cmpexch_failure_memory_order =
+    typename CmpExchFailureOrder<MemoryOrder>::memory_order;
+}  // namespace Impl
+
+}  // namespace desul
+
+// We should in principle use std::numeric_limits, but that requires constexpr function
+// support on device Currently that is still considered experimetal on CUDA and
+// sometimes not reliable.
+namespace desul {
+namespace Impl {
+template <class T>
+struct numeric_limits_max;
+
+template <>
+struct numeric_limits_max<uint32_t> {
+  static constexpr uint32_t value = 0xffffffffu;
+};
+template <>
+struct numeric_limits_max<uint64_t> {
+  static constexpr uint64_t value = 0xfffffffflu;
+};
+
+constexpr bool atomic_always_lock_free(std::size_t size) {
+  return size == 4 || size == 8
+#if defined(DESUL_HAVE_16BYTE_COMPARE_AND_SWAP)
+         || size == 16
+#endif
+      ;
+}
+
+template <std::size_t Size, std::size_t Align>
+DESUL_INLINE_FUNCTION bool atomic_is_lock_free() noexcept {
+  return Size == 4 || Size == 8
+#if defined(DESUL_HAVE_16BYTE_COMPARE_AND_SWAP)
+         || Size == 16
+#endif
+      ;
+}
+
+template <std::size_t N>
+struct atomic_compare_exchange_type;
+
+template <>
+struct atomic_compare_exchange_type<4> {
+  using type = int32_t;
+};
+
+template <>
+struct atomic_compare_exchange_type<8> {
+  using type = int64_t;
+};
+
+template <>
+struct atomic_compare_exchange_type<16> {
+  using type = Dummy16ByteValue;
+};
+
+template <class T>
+struct dont_deduce_this_parameter {
+  using type = T;
+};
+
+template <class T>
+using dont_deduce_this_parameter_t = typename dont_deduce_this_parameter<T>::type;
+
+}  // namespace Impl
+}  // namespace desul
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange.hpp
new file mode 100644 (file)
index 0000000..d947dac
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_HPP_
+
+#include "desul/atomics/Compare_Exchange_ScopeCaller.hpp"
+#include "desul/atomics/Macros.hpp"
+
+#ifdef DESUL_HAVE_GCC_ATOMICS
+#include "desul/atomics/Compare_Exchange_GCC.hpp"
+#endif
+#ifdef DESUL_HAVE_MSVC_ATOMICS
+#include "desul/atomics/Compare_Exchange_MSVC.hpp"
+#endif
+#ifdef DESUL_HAVE_SERIAL_ATOMICS
+#include "desul/atomics/Compare_Exchange_Serial.hpp"
+#endif
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+#include "desul/atomics/Compare_Exchange_CUDA.hpp"
+#endif
+#ifdef DESUL_HAVE_HIP_ATOMICS
+#include "desul/atomics/Compare_Exchange_HIP.hpp"
+#endif
+#ifdef DESUL_HAVE_OPENMP_ATOMICS
+#include "desul/atomics/Compare_Exchange_OpenMP.hpp"
+#endif
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+#include "desul/atomics/Compare_Exchange_SYCL.hpp"
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_CUDA.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_CUDA.hpp
new file mode 100644 (file)
index 0000000..310c59f
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_CUDA_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_CUDA_HPP_
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Lock_Array_Cuda.hpp"
+
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+namespace desul {
+// Only include if compiling device code, or the CUDA compiler is not NVCC (i.e. Clang)
+// atomic_thread_fence implementation
+#if defined(__CUDA_ARCH__) || !defined(__NVCC__)
+__device__ inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
+  __threadfence();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
+  __threadfence();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
+  __threadfence();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
+  __threadfence();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
+  __threadfence_block();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
+  __threadfence_block();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
+  __threadfence_block();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
+  __threadfence_block();
+}
+#if (__CUDA_ARCH__ >= 600) || !defined(__NVCC__)
+__device__ inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeNode) {
+  __threadfence_system();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeNode) {
+  __threadfence_system();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeNode) {
+  __threadfence_system();
+}
+__device__ inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeNode) {
+  __threadfence_system();
+}
+#endif
+#endif
+}  // namespace desul
+
+// Compare Exchange for PRE Volta, not supported with CLANG as CUDA compiler, since we
+// do NOT have a way of having the code included for clang only when the CC is smaller
+// than 700 But on Clang the device side symbol list must be independent of
+// __CUDA_ARCH__
+// FIXME temporary fix for https://github.com/kokkos/kokkos/issues/4390
+#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700) || \
+    (!defined(__NVCC__) && defined(DESUL_CUDA_ARCH_IS_PRE_VOLTA) && 0)
+namespace desul {
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned int) == 4,
+                "this function assumes an unsigned int is 32-bit");
+  unsigned int return_val = atomicCAS(reinterpret_cast<unsigned int*>(dest),
+                                      reinterpret_cast<unsigned int&>(compare),
+                                      reinterpret_cast<unsigned int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned long long int) == 8,
+                "this function assumes an unsigned long long  is 64-bit");
+  unsigned long long int return_val =
+      atomicCAS(reinterpret_cast<unsigned long long int*>(dest),
+                reinterpret_cast<unsigned long long int&>(compare),
+                reinterpret_cast<unsigned long long int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderAcquire, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  return return_val;
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(
+    T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned int) == 4,
+                "this function assumes an unsigned int is 32-bit");
+  unsigned int return_val = atomicExch(reinterpret_cast<unsigned int*>(dest),
+                                       reinterpret_cast<unsigned int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(
+    T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned long long int) == 8,
+                "this function assumes an unsigned long long  is 64-bit");
+  unsigned long long int return_val =
+      atomicExch(reinterpret_cast<unsigned long long int*>(dest),
+                 reinterpret_cast<unsigned long long int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_exchange(T* const dest, T value, MemoryOrderRelease, MemoryScope) {
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_exchange(T* const dest, T value, MemoryOrderAcquire, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_exchange(T* const dest, T value, MemoryOrderAcqRel, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return reinterpret_cast<T&>(return_val);
+}
+}  // namespace desul
+#endif
+
+// Including CUDA ptx based exchange atomics
+// When building with clang we need to include the device functions always
+// since clang must see a consistent overload set in both device and host compilation
+// but that means we need to know on the host what to make visible, i.e. we need
+// a host side compile knowledge of architecture.
+// We simply can say DESUL proper doesn't support clang CUDA build pre Volta,
+// Kokkos has that knowledge and so I use it here, allowing in Kokkos to use
+// clang with pre Volta as CUDA compiler
+#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 700)) || \
+    (!defined(__NVCC__) && !defined(DESUL_CUDA_ARCH_IS_PRE_VOLTA))
+#include <desul/atomics/cuda/CUDA_asm_exchange.hpp>
+#endif
+
+// SeqCst is not directly supported by PTX, need the additional fences:
+
+#if defined(__CUDA_ARCH__) || !defined(__NVCC__)
+namespace desul {
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(
+    T* const dest, T value, MemoryOrderSeqCst, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(
+    T* const dest, T value, MemoryOrderSeqCst, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+}  // namespace desul
+#endif
+
+#if defined(__CUDA_ARCH__) || !defined(__NVCC__)
+namespace desul {
+template <typename T, class MemoryOrder, class MemoryScope>
+__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrder, MemoryScope scope) {
+  // This is a way to avoid dead lock in a warp or wave front
+  T return_val;
+  int done = 0;
+  unsigned int mask = DESUL_IMPL_ACTIVEMASK;
+  unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda((void*)dest, scope)) {
+        if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+          atomic_thread_fence(MemoryOrderRelease(), scope);
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = *dest;
+        if (return_val == compare) {
+          *dest = value;
+          atomic_thread_fence(MemoryOrderRelease(), scope);
+        }
+        Impl::unlock_address_cuda((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
+  }
+  return return_val;
+}
+template <typename T, class MemoryOrder, class MemoryScope>
+__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
+atomic_exchange(T* const dest, T value, MemoryOrder, MemoryScope scope) {
+  // This is a way to avoid dead lock in a warp or wave front
+  T return_val;
+  int done = 0;
+  unsigned int mask = DESUL_IMPL_ACTIVEMASK;
+  unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda((void*)dest, scope)) {
+        if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+          atomic_thread_fence(MemoryOrderRelease(), scope);
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = *dest;
+        *dest = value;
+        atomic_thread_fence(MemoryOrderRelease(), scope);
+        Impl::unlock_address_cuda((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
+  }
+  return return_val;
+}
+}  // namespace desul
+#endif
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_GCC.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_GCC.hpp
new file mode 100644 (file)
index 0000000..fad3c43
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_GCC_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_GCC_HPP_
+#include "desul/atomics/Common.hpp"
+
+#ifdef DESUL_HAVE_GCC_ATOMICS
+#if !defined(DESUL_HAVE_16BYTE_COMPARE_AND_SWAP) && !defined(__CUDACC__)
+// This doesn't work in WSL??
+//#define DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
+#endif
+namespace desul {
+
+namespace Impl {
+template <class T>
+struct atomic_exchange_available_gcc {
+  constexpr static bool value =
+#ifndef DESUL_HAVE_LIBATOMIC
+      ((sizeof(T) == 4 && alignof(T) == 4) ||
+#ifdef DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
+       (sizeof(T) == 16 && alignof(T) == 16) ||
+#endif
+       (sizeof(T) == 8 && alignof(T) == 8)) &&
+#endif
+      std::is_trivially_copyable<T>::value;
+};
+}  // namespace Impl
+
+#if defined(__clang__) && (__clang_major__ >= 7) && !defined(__APPLE__)
+// clang-format off
+// Disable warning for large atomics on clang 7 and up (checked with godbolt)
+// error: large atomic operation may incur significant performance penalty [-Werror,-Watomic-alignment]
+// https://godbolt.org/z/G7YhqhbG6
+// clang-format on
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Watomic-alignment"
+#endif
+template <class MemoryOrder, class MemoryScope>
+void atomic_thread_fence(MemoryOrder, MemoryScope) {
+  __atomic_thread_fence(GCCMemoryOrder<MemoryOrder>::value);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T> atomic_exchange(
+    T* dest, T value, MemoryOrder, MemoryScope) {
+  T return_val;
+  __atomic_exchange(dest, &value, &return_val, GCCMemoryOrder<MemoryOrder>::value);
+  return return_val;
+}
+
+// Failure mode for atomic_compare_exchange_n cannot be RELEASE nor ACQREL so
+// Those two get handled separatly.
+template <typename T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T>
+atomic_compare_exchange(T* dest, T compare, T value, MemoryOrder, MemoryScope) {
+  (void)__atomic_compare_exchange(dest,
+                                  &compare,
+                                  &value,
+                                  false,
+                                  GCCMemoryOrder<MemoryOrder>::value,
+                                  GCCMemoryOrder<MemoryOrder>::value);
+  return compare;
+}
+
+template <typename T, class MemoryScope>
+std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T>
+atomic_compare_exchange(T* dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
+  (void)__atomic_compare_exchange(
+      dest, &compare, &value, false, __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+  return compare;
+}
+
+template <typename T, class MemoryScope>
+std::enable_if_t<Impl::atomic_exchange_available_gcc<T>::value, T>
+atomic_compare_exchange(T* dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
+  (void)__atomic_compare_exchange(
+      dest, &compare, &value, false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
+  return compare;
+}
+
+#if defined(__clang__) && (__clang_major__ >= 7) && !defined(__APPLE__)
+#pragma GCC diagnostic pop
+#endif
+}  // namespace desul
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_HIP.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_HIP.hpp
new file mode 100644 (file)
index 0000000..96739bc
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_HIP_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_HIP_HPP_
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Lock_Array_HIP.hpp"
+
+#ifdef DESUL_HAVE_HIP_ATOMICS
+namespace desul {
+inline __device__ void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
+  __threadfence();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
+  __threadfence();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
+  __threadfence();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
+  __threadfence();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
+  __threadfence_block();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
+  __threadfence_block();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
+  __threadfence_block();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
+  __threadfence_block();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderRelease, MemoryScopeNode) {
+  __threadfence_system();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeNode) {
+  __threadfence_system();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeNode) {
+  __threadfence_system();
+}
+inline __device__ void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeNode) {
+  __threadfence_system();
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned int) == 4,
+                "this function assumes an unsigned int is 32-bit");
+  unsigned int return_val = atomicCAS(reinterpret_cast<unsigned int*>(dest),
+                                      reinterpret_cast<unsigned int&>(compare),
+                                      reinterpret_cast<unsigned int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned long long int) == 8,
+                "this function assumes an unsigned long long  is 64-bit");
+  unsigned long long int return_val =
+      atomicCAS(reinterpret_cast<unsigned long long int*>(dest),
+                reinterpret_cast<unsigned long long int&>(compare),
+                reinterpret_cast<unsigned long long int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderAcquire, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  return return_val;
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderAcqRel, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(
+    T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned int) == 4,
+                "this function assumes an unsigned int is 32-bit");
+  unsigned int return_val = atomicExch(reinterpret_cast<unsigned int*>(dest),
+                                       reinterpret_cast<unsigned int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(
+    T* const dest, T value, MemoryOrderRelaxed, MemoryScope) {
+  static_assert(sizeof(unsigned long long int) == 8,
+                "this function assumes an unsigned long long  is 64-bit");
+  unsigned long long int return_val =
+      atomicExch(reinterpret_cast<unsigned long long int*>(dest),
+                 reinterpret_cast<unsigned long long int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_exchange(T* const dest, T compare, T value, MemoryOrderRelease, MemoryScope) {
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_exchange(
+    T* const dest, T /*compare*/, T value, MemoryOrderAcquire, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_exchange(T* const dest, T value, MemoryOrderAcqRel, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_exchange(T* const dest, T value, MemoryOrderSeqCst, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_exchange(dest, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryScope>
+__device__ typename std::enable_if<sizeof(T) == 4 || sizeof(T) == 8, T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
+  atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T return_val = atomic_compare_exchange(
+      dest, compare, value, MemoryOrderRelaxed(), MemoryScope());
+  atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrder, MemoryScope scope) {
+  // This is a way to avoid dead lock in a warp or wave front
+  T return_val;
+  int done = 0;
+  unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
+  unsigned long long int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip((void*)dest, scope)) {
+        if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+          atomic_thread_fence(MemoryOrderRelease(), scope);
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = *dest;
+        if (return_val == compare) {
+          *dest = value;
+          atomic_thread_fence(MemoryOrderRelease(), scope);
+        }
+        Impl::unlock_address_hip((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(done);
+  }
+  return return_val;
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+__device__ typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
+atomic_exchange(T* const dest, T value, MemoryOrder, MemoryScope scope) {
+  // This is a way to avoid dead lock in a warp or wave front
+  T return_val;
+  int done = 0;
+  unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
+  unsigned long long int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip((void*)dest, scope)) {
+        if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+          atomic_thread_fence(MemoryOrderRelease(), scope);
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = *dest;
+        *dest = value;
+        atomic_thread_fence(MemoryOrderRelease(), scope);
+        Impl::unlock_address_hip((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(done);
+  }
+  return return_val;
+}
+}  // namespace desul
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_MSVC.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_MSVC.hpp
new file mode 100644 (file)
index 0000000..edf72f1
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_MSVC_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_MSVC_HPP_
+#include <type_traits>
+
+#include "desul/atomics/Common.hpp"
+#ifdef DESUL_HAVE_MSVC_ATOMICS
+
+#ifndef DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
+#define DESUL_HAVE_16BYTE_COMPARE_AND_SWAP
+#endif
+
+namespace desul {
+
+// Forward declare these functions. They use compare_exchange themselves
+// so the actual header file with them comes after this file is included.
+namespace Impl {
+template <typename MemoryScope>
+inline bool lock_address(void* ptr, MemoryScope ms);
+
+template <typename MemoryScope>
+void unlock_address(void* ptr, MemoryScope ms);
+}  // namespace Impl
+
+template <class MemoryOrder, class MemoryScope>
+void atomic_thread_fence(MemoryOrder, MemoryScope) {
+  std::atomic_thread_fence(CXXMemoryOrder<MemoryOrder>::value);
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 1, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderRelaxed,
+                                                                 MemoryScope) {
+  char return_val = _InterlockedExchange8((char*)dest, *((char*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 2, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderRelaxed,
+                                                                 MemoryScope) {
+  short return_val = _InterlockedExchange16((short*)dest, *((short*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderRelaxed,
+                                                                 MemoryScope) {
+  long return_val = _InterlockedExchange((long*)dest, *((long*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderRelaxed,
+                                                                 MemoryScope) {
+  __int64 return_val = _InterlockedExchange64((__int64*)dest, *((__int64*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 1, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderSeqCst,
+                                                                 MemoryScope) {
+  char return_val = _InterlockedExchange8((char*)dest, *((char*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 2, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderSeqCst,
+                                                                 MemoryScope) {
+  short return_val = _InterlockedExchange16((short*)dest, *((short*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderSeqCst,
+                                                                 MemoryScope) {
+  long return_val = _InterlockedExchange((long*)dest, *((long*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(T* const dest,
+                                                                 T val,
+                                                                 MemoryOrderSeqCst,
+                                                                 MemoryScope) {
+  __int64 return_val = _InterlockedExchange64((__int64*)dest, *((__int64*)&val));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<(sizeof(T) != 1 && sizeof(T) != 2 && sizeof(T) != 4 &&
+                         sizeof(T) != 8),
+                        T>::type
+atomic_exchange(T* const dest, T val, MemoryOrder, MemoryScope scope) {
+  while (!Impl::lock_address((void*)dest, scope)) {
+  }
+  if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+    atomic_thread_fence(MemoryOrderRelease(), scope);
+  atomic_thread_fence(MemoryOrderAcquire(), scope);
+  T return_val = *dest;
+  *dest = val;
+  atomic_thread_fence(MemoryOrderRelease(), scope);
+
+  Impl::unlock_address((void*)dest, scope);
+  return return_val;
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 1, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
+  char return_val =
+      _InterlockedCompareExchange8((char*)dest, *((char*)&val), *((char*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 2, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
+  short return_val =
+      _InterlockedCompareExchange16((short*)dest, *((short*)&val), *((short*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
+  long return_val =
+      _InterlockedCompareExchange((long*)dest, *((long*)&val), *((long*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
+  __int64 return_val = _InterlockedCompareExchange64(
+      (__int64*)dest, *((__int64*)&val), *((__int64*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 16, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderRelaxed, MemoryScope) {
+  Dummy16ByteValue* val16 = reinterpret_cast<Dummy16ByteValue*>(&val);
+  (void)_InterlockedCompareExchange128(reinterpret_cast<__int64*>(dest),
+                                       val16->value2,
+                                       val16->value1,
+                                       (reinterpret_cast<__int64*>(&compare)));
+  return compare;
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 1, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
+  char return_val =
+      _InterlockedCompareExchange8((char*)dest, *((char*)&val), *((char*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 2, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
+  short return_val =
+      _InterlockedCompareExchange16((short*)dest, *((short*)&val), *((short*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
+  long return_val =
+      _InterlockedCompareExchange((long*)dest, *((long*)&val), *((long*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
+  __int64 return_val = _InterlockedCompareExchange64(
+      (__int64*)dest, *((__int64*)&val), *((__int64*)&compare));
+  return *(reinterpret_cast<T*>(&return_val));
+}
+
+template <typename T, class MemoryScope>
+typename std::enable_if<sizeof(T) == 16, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrderSeqCst, MemoryScope) {
+  Dummy16ByteValue* val16 = reinterpret_cast<Dummy16ByteValue*>(&val);
+  (void)_InterlockedCompareExchange128(reinterpret_cast<__int64*>(dest),
+                                       val16->value2,
+                                       val16->value1,
+                                       (reinterpret_cast<__int64*>(&compare)));
+  return compare;
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<(sizeof(T) != 1 && sizeof(T) != 2 && sizeof(T) != 4 &&
+                         sizeof(T) != 8 && sizeof(T) != 16),
+                        T>::type
+atomic_compare_exchange(
+    T* const dest, T compare, T val, MemoryOrder, MemoryScope scope) {
+  while (!Impl::lock_address((void*)dest, scope)) {
+  }
+  if (std::is_same<MemoryOrder, MemoryOrderSeqCst>::value)
+    atomic_thread_fence(MemoryOrderRelease(), scope);
+  atomic_thread_fence(MemoryOrderAcquire(), scope);
+  T return_val = *dest;
+  if (return_val == compare) {
+    *dest = val;
+    atomic_thread_fence(MemoryOrderRelease(), scope);
+  }
+
+  Impl::unlock_address((void*)dest, scope);
+  return return_val;
+}
+
+}  // namespace desul
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_OpenMP.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_OpenMP.hpp
new file mode 100644 (file)
index 0000000..dfea81a
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_OPENMP_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_OPENMP_HPP_
+#include <omp.h>
+
+#include "desul/atomics/Common.hpp"
+
+#ifdef DESUL_HAVE_OPENMP_ATOMICS
+namespace desul {
+
+#if _OPENMP > 201800
+// atomic_thread_fence for Core Scope
+inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
+// There is no seq_cst flush in OpenMP, isn't it the same anyway for fence?
+#pragma omp flush acq_rel
+}
+inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
+#pragma omp flush acq_rel
+}
+inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
+#pragma omp flush release
+}
+inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
+#pragma omp flush acquire
+}
+// atomic_thread_fence for Device Scope
+inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
+// There is no seq_cst flush in OpenMP, isn't it the same anyway for fence?
+#pragma omp flush acq_rel
+}
+inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
+#pragma omp flush acq_rel
+}
+inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
+#pragma omp flush release
+}
+inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
+#pragma omp flush acquire
+}
+#else
+// atomic_thread_fence for Core Scope
+inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeCore) {
+#pragma omp flush
+}
+inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeCore) {
+#pragma omp flush
+}
+inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeCore) {
+#pragma omp flush
+}
+inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeCore) {
+#pragma omp flush
+}
+// atomic_thread_fence for Device Scope
+inline void atomic_thread_fence(MemoryOrderSeqCst, MemoryScopeDevice) {
+#pragma omp flush
+}
+inline void atomic_thread_fence(MemoryOrderAcqRel, MemoryScopeDevice) {
+#pragma omp flush
+}
+inline void atomic_thread_fence(MemoryOrderRelease, MemoryScopeDevice) {
+#pragma omp flush
+}
+inline void atomic_thread_fence(MemoryOrderAcquire, MemoryScopeDevice) {
+#pragma omp flush
+}
+#endif
+
+template <typename T, class MemoryOrder, class MemoryScope>
+T atomic_exchange(T* dest, T value, MemoryOrder, MemoryScope) {
+  T return_val;
+  if (!std::is_same<MemoryOrder, MemoryOrderRelaxed>::value)
+    atomic_thread_fence(MemoryOrderAcquire(), MemoryScope());
+  T& x = *dest;
+#pragma omp atomic capture
+  {
+    return_val = x;
+    x = value;
+  }
+  if (!std::is_same<MemoryOrder, MemoryOrderRelaxed>::value)
+    atomic_thread_fence(MemoryOrderRelease(), MemoryScope());
+  return return_val;
+}
+
+// OpenMP doesn't have compare exchange, so we use build-ins and rely on testing that
+// this works Note that means we test this in OpenMPTarget offload regions!
+template <typename T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<Impl::atomic_always_lock_free(sizeof(T)), T> atomic_compare_exchange(
+    T* dest, T compare, T value, MemoryOrder, MemoryScope) {
+  using cas_t = typename Impl::atomic_compare_exchange_type<sizeof(T)>::type;
+  cas_t retval = __sync_val_compare_and_swap(reinterpret_cast<volatile cas_t*>(dest),
+                                             reinterpret_cast<cas_t&>(compare),
+                                             reinterpret_cast<cas_t&>(value));
+  return reinterpret_cast<T&>(retval);
+}
+
+#if defined(__clang__) && (__clang_major__ >= 7)
+// Disable warning for large atomics on clang 7 and up (checked with godbolt)
+// clang-format off
+// error: large atomic operation may incur significant performance penalty [-Werror,-Watomic-alignment]
+// clang-format on
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Watomic-alignment"
+#endif
+
+// Make 16 byte cas work on host at least
+#pragma omp begin declare variant match(device = {kind(host)})
+template <typename T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!Impl::atomic_always_lock_free(sizeof(T)) && (sizeof(T) == 16), T>
+atomic_compare_exchange(T* dest, T compare, T value, MemoryOrder, MemoryScope) {
+  (void)__atomic_compare_exchange(dest,
+                                  &compare,
+                                  &value,
+                                  false,
+                                  GCCMemoryOrder<MemoryOrder>::value,
+                                  GCCMemoryOrder<MemoryOrder>::value);
+  return compare;
+}
+#pragma omp end declare variant
+
+#pragma omp begin declare variant match(device = {kind(nohost)})
+template <typename T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!Impl::atomic_always_lock_free(sizeof(T)) && (sizeof(T) == 16), T>
+atomic_compare_exchange(T* /*dest*/, T /*compare*/, T value, MemoryOrder, MemoryScope) {
+  // FIXME make sure this never gets called
+  return value;
+}
+#pragma omp end declare variant
+
+#if defined(__clang__) && (__clang_major__ >= 7)
+#pragma GCC diagnostic pop
+#endif
+
+}  // namespace desul
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_SYCL.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_SYCL.hpp
new file mode 100644 (file)
index 0000000..6c8c685
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_SYCL_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_SYCL_HPP_
+
+// clang-format off
+#include "desul/atomics/SYCLConversions.hpp"
+#include "desul/atomics/Common.hpp"
+
+#include <CL/sycl.hpp>
+// clang-format on
+
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+
+namespace desul {
+
+template <class MemoryOrder, class MemoryScope>
+inline void atomic_thread_fence(MemoryOrder, MemoryScope) {
+  sycl::atomic_fence(
+      Impl::DesulToSYCLMemoryOrder<MemoryOrder, /*extended namespace*/ false>::value,
+      Impl::DesulToSYCLMemoryScope<MemoryScope, /*extended namespace*/ false>::value);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<sizeof(T) == 4, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrder, MemoryScope) {
+  static_assert(sizeof(unsigned int) == 4,
+                "this function assumes an unsigned int is 32-bit");
+  Impl::sycl_atomic_ref<unsigned int, MemoryOrder, MemoryScope> dest_ref(
+      *reinterpret_cast<unsigned int*>(dest));
+  dest_ref.compare_exchange_strong(*reinterpret_cast<unsigned int*>(&compare),
+                                   *reinterpret_cast<unsigned int*>(&value));
+  return compare;
+}
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<sizeof(T) == 8, T>::type atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrder, MemoryScope) {
+  static_assert(sizeof(unsigned long long int) == 8,
+                "this function assumes an unsigned long long is 64-bit");
+  Impl::sycl_atomic_ref<unsigned long long int, MemoryOrder, MemoryScope> dest_ref(
+      *reinterpret_cast<unsigned long long int*>(dest));
+  dest_ref.compare_exchange_strong(*reinterpret_cast<unsigned long long int*>(&compare),
+                                   *reinterpret_cast<unsigned long long int*>(&value));
+  return compare;
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<sizeof(T) == 4, T>::type atomic_exchange(T* const dest,
+                                                                 T value,
+                                                                 MemoryOrder,
+                                                                 MemoryScope) {
+  static_assert(sizeof(unsigned int) == 4,
+                "this function assumes an unsigned int is 32-bit");
+  Impl::sycl_atomic_ref<unsigned int, MemoryOrder, MemoryScope> dest_ref(
+      *reinterpret_cast<unsigned int*>(dest));
+  unsigned int return_val = dest_ref.exchange(*reinterpret_cast<unsigned int*>(&value));
+  return reinterpret_cast<T&>(return_val);
+}
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<sizeof(T) == 8, T>::type atomic_exchange(T* const dest,
+                                                                 T value,
+                                                                 MemoryOrder,
+                                                                 MemoryScope) {
+  static_assert(sizeof(unsigned long long int) == 8,
+                "this function assumes an unsigned long long is 64-bit");
+  Impl::sycl_atomic_ref<unsigned long long int, MemoryOrder, MemoryScope> dest_ref(
+      *reinterpret_cast<unsigned long long int*>(dest));
+  unsigned long long int return_val =
+      dest_ref.exchange(reinterpret_cast<unsigned long long int&>(value));
+  return reinterpret_cast<T&>(return_val);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type
+atomic_compare_exchange(
+    T* const /*dest*/, T compare, T /*value*/, MemoryOrder, MemoryScope) {
+  // FIXME_SYCL not implemented
+  assert(false);
+  return compare;
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+typename std::enable_if<(sizeof(T) != 8) && (sizeof(T) != 4), T>::type atomic_exchange(
+    T* const /*dest*/, T value, MemoryOrder, MemoryScope) {
+  // FIXME_SYCL not implemented
+  assert(false);
+  return value;
+}
+
+}  // namespace desul
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_ScopeCaller.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_ScopeCaller.hpp
new file mode 100644 (file)
index 0000000..fff6320
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_SCOPECALLER_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_SCOPECALLER_HPP_
+#include "desul/atomics/Common.hpp"
+
+namespace desul {
+
+template <class MemoryOrder>
+DESUL_INLINE_FUNCTION void atomic_thread_fence(MemoryOrder, MemoryScopeCaller) {}
+
+#define DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MEMORY_ORDER)               \
+  template <typename T>                                               \
+  DESUL_INLINE_FUNCTION T atomic_exchange(                            \
+      T* dest, T value, MEMORY_ORDER, MemoryScopeCaller) {            \
+    T return_val = *dest;                                             \
+    *dest = value;                                                    \
+    return return_val;                                                \
+  }                                                                   \
+                                                                      \
+  template <typename T>                                               \
+  DESUL_INLINE_FUNCTION T atomic_compare_exchange(                    \
+      T* dest, T compare, T value, MEMORY_ORDER, MemoryScopeCaller) { \
+    T current_val = *dest;                                            \
+    if (current_val == compare) *dest = value;                        \
+    return current_val;                                               \
+  }
+
+DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MemoryOrderSeqCst)
+DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MemoryOrderAcqRel)
+DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MemoryOrderRelease)
+DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MemoryOrderAcquire)
+DESUL_ATOMIC_EXCHANGE_SCOPECALLER(MemoryOrderRelaxed)
+
+#undef DESUL_ATOMIC_EXCHANGE_SCOPECALLER
+}  // namespace desul
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_Serial.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Compare_Exchange_Serial.hpp
new file mode 100644 (file)
index 0000000..9d0db5c
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_COMPARE_EXCHANGE_SERIAL_HPP_
+#define DESUL_ATOMICS_COMPARE_EXCHANGE_SERIAL_HPP_
+
+#ifdef DESUL_HAVE_SERIAL_ATOMICS
+namespace desul {
+template <class MemoryScope>
+void atomic_thread_fence(MemoryOrderAcquire, MemoryScope) {}
+
+template <class MemoryScope>
+void atomic_thread_fence(MemoryOrderRelease, MemoryScope) {}
+
+template <typename T, class MemoryScope>
+T atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderRelaxed, MemoryScope) {
+  T old = *dest;
+  if (old == compare) {
+    *dest = value;
+  } else {
+    old = compare;
+  }
+  return compare;
+}
+template <typename T, class MemoryScope>
+T atomic_compare_exchange(
+    T* const dest, T compare, T value, MemoryOrderSeqCst, MemoryScope) {
+  T old = *dest;
+  if (old == compare) {
+    *dest = value;
+  } else {
+    old = compare;
+  }
+  return compare;
+}
+}  // namespace desul
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/GCC.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/GCC.hpp
new file mode 100644 (file)
index 0000000..239c84f
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_GCC_HPP_
+#define DESUL_ATOMICS_GCC_HPP_
+
+#ifdef DESUL_HAVE_GCC_ATOMICS
+
+#include <type_traits>
+/*
+Built - in Function : type __atomic_add_fetch(type * ptr, type val, int memorder)
+Built - in Function : type __atomic_sub_fetch(type * ptr, type val, int memorder)
+Built - in Function : type __atomic_and_fetch(type * ptr, type val, int memorder)
+Built - in Function : type __atomic_xor_fetch(type * ptr, type val, int memorder)
+Built - in Function : type __atomic_or_fetch(type * ptr, type val, int memorder)
+Built - in Function : type __atomic_nand_fetch(type * ptr, type val, int memorder)
+*/
+
+#define DESUL_GCC_INTEGRAL_OP_ATOMICS(MEMORY_ORDER, MEMORY_SCOPE)                 \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_add(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_fetch_add(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_sub(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_fetch_sub(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_and(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_fetch_and(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_or(   \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_fetch_or(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);   \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_xor(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_fetch_xor(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_fetch_nand( \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_fetch_nand(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_add_fetch(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_add_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_sub_fetch(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_sub_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_and_fetch(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_and_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_or_fetch(   \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_or_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);   \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_xor_fetch(  \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_xor_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value);  \
+  }                                                                               \
+  template <typename T>                                                           \
+  typename std::enable_if<std::is_integral<T>::value, T>::type atomic_nand_fetch( \
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       \
+    return __atomic_nand_fetch(dest, value, GCCMemoryOrder<MEMORY_ORDER>::value); \
+  }
+
+namespace desul {
+DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderRelaxed, MemoryScopeNode)
+DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderRelaxed, MemoryScopeDevice)
+DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderRelaxed, MemoryScopeCore)
+DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderSeqCst, MemoryScopeNode)
+DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderSeqCst, MemoryScopeDevice)
+DESUL_GCC_INTEGRAL_OP_ATOMICS(MemoryOrderSeqCst, MemoryScopeCore)
+
+template <typename T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!Impl::atomic_exchange_available_gcc<T>::value, T> atomic_exchange(
+    T* const dest,
+    Impl::dont_deduce_this_parameter_t<const T> val,
+    MemoryOrder /*order*/,
+    MemoryScope scope) {
+  // Acquire a lock for the address
+  // clang-format off
+  while (!Impl::lock_address((void*)dest, scope)) {}
+  // clang-format on
+
+  atomic_thread_fence(MemoryOrderAcquire(), scope);
+  T return_val = *dest;
+  *dest = val;
+  atomic_thread_fence(MemoryOrderRelease(), scope);
+  Impl::unlock_address((void*)dest, scope);
+  return return_val;
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+std::enable_if_t<!Impl::atomic_exchange_available_gcc<T>::value, T>
+atomic_compare_exchange(T* const dest,
+                        Impl::dont_deduce_this_parameter_t<const T> compare,
+                        Impl::dont_deduce_this_parameter_t<const T> val,
+                        MemoryOrder /*order*/,
+                        MemoryScope scope) {
+  // Acquire a lock for the address
+  // clang-format off
+  while (!Impl::lock_address((void*)dest, scope)) {}
+  // clang-format on
+
+  atomic_thread_fence(MemoryOrderAcquire(), scope);
+  T return_val = *dest;
+  if (return_val == compare) {
+    *dest = val;
+    atomic_thread_fence(MemoryOrderRelease(), scope);
+  }
+  Impl::unlock_address((void*)dest, scope);
+  return return_val;
+}
+}  // namespace desul
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Generic.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Generic.hpp
new file mode 100644 (file)
index 0000000..1fffd3b
--- /dev/null
@@ -0,0 +1,766 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_GENERIC_HPP_
+#define DESUL_ATOMICS_GENERIC_HPP_
+
+#include <type_traits>
+#if defined(__GNUC__) && (!defined(__clang__))
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#endif
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Compare_Exchange.hpp"
+#include "desul/atomics/Lock_Array.hpp"
+#include "desul/atomics/Macros.hpp"
+// Combination operands to be used in an Compare and Exchange based atomic
+// operation
+namespace desul {
+namespace Impl {
+
+template <class Scalar1, class Scalar2>
+struct MaxOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return (val1 > val2 ? val1 : val2);
+  }
+  DESUL_FORCEINLINE_FUNCTION
+  static constexpr bool check_early_exit(Scalar1 const& val1, Scalar2 const& val2) {
+    return val1 > val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct MinOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return (val1 < val2 ? val1 : val2);
+  }
+  DESUL_FORCEINLINE_FUNCTION
+  static constexpr bool check_early_exit(Scalar1 const& val1, Scalar2 const& val2) {
+    return val1 < val2;
+  }
+};
+
+template <typename Op, typename Scalar1, typename Scalar2, typename = bool>
+struct may_exit_early : std::false_type {};
+
+// This exit early optimization causes weird compiler errors with MSVC 2019
+#ifndef DESUL_HAVE_MSVC_ATOMICS
+template <typename Op, typename Scalar1, typename Scalar2>
+struct may_exit_early<Op,
+                      Scalar1,
+                      Scalar2,
+                      decltype(Op::check_early_exit(std::declval<Scalar1 const&>(),
+                                                    std::declval<Scalar2 const&>()))>
+    : std::true_type {};
+#endif
+
+template <typename Op, typename Scalar1, typename Scalar2>
+constexpr DESUL_FUNCTION
+    typename std::enable_if<may_exit_early<Op, Scalar1, Scalar2>::value, bool>::type
+    check_early_exit(Op const&, Scalar1 const& val1, Scalar2 const& val2) {
+  return Op::check_early_exit(val1, val2);
+}
+
+template <typename Op, typename Scalar1, typename Scalar2>
+constexpr DESUL_FUNCTION
+    typename std::enable_if<!may_exit_early<Op, Scalar1, Scalar2>::value, bool>::type
+    check_early_exit(Op const&, Scalar1 const&, Scalar2 const&) {
+  return false;
+}
+
+template <class Scalar1, class Scalar2>
+struct AddOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 + val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct SubOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 - val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct MulOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 * val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct DivOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 / val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct ModOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 % val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct AndOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 & val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct OrOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 | val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct XorOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) { return val1 ^ val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct NandOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return ~(val1 & val2);
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct LShiftOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 << val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct RShiftOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return val1 >> val2;
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct IncModOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return ((val1 >= val2) ? Scalar1(0) : val1 + Scalar1(1));
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct DecModOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2& val2) {
+    return (((val1 == Scalar1(0)) | (val1 > val2)) ? val2 : (val1 - Scalar1(1)));
+  }
+};
+
+template <class Scalar1, class Scalar2>
+struct StoreOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1&, const Scalar2& val2) { return val2; }
+};
+
+template <class Scalar1, class Scalar2>
+struct LoadOper {
+  DESUL_FORCEINLINE_FUNCTION
+  static Scalar1 apply(const Scalar1& val1, const Scalar2&) { return val1; }
+};
+
+template <class Oper,
+          typename T,
+          class MemoryOrder,
+          class MemoryScope,
+          // equivalent to:
+          //   requires atomic_always_lock_free(sizeof(T))
+          std::enable_if_t<atomic_always_lock_free(sizeof(T)), int> = 0>
+DESUL_INLINE_FUNCTION T atomic_fetch_oper(const Oper& op,
+                                          T* const dest,
+                                          dont_deduce_this_parameter_t<const T> val,
+                                          MemoryOrder order,
+                                          MemoryScope scope) {
+  using cas_t = typename atomic_compare_exchange_type<sizeof(T)>::type;
+  cas_t oldval = reinterpret_cast<cas_t&>(*dest);
+  cas_t assume = oldval;
+
+  do {
+    if (Impl::check_early_exit(op, reinterpret_cast<T&>(oldval), val))
+      return reinterpret_cast<T&>(oldval);
+    assume = oldval;
+    T newval = op.apply(reinterpret_cast<T&>(assume), val);
+    oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest),
+                                            assume,
+                                            reinterpret_cast<cas_t&>(newval),
+                                            order,
+                                            scope);
+  } while (assume != oldval);
+
+  return reinterpret_cast<T&>(oldval);
+}
+
+template <class Oper,
+          typename T,
+          class MemoryOrder,
+          class MemoryScope,
+          // equivalent to:
+          //   requires atomic_always_lock_free(sizeof(T))
+          std::enable_if_t<atomic_always_lock_free(sizeof(T)), int> = 0>
+DESUL_INLINE_FUNCTION T atomic_oper_fetch(const Oper& op,
+                                          T* const dest,
+                                          dont_deduce_this_parameter_t<const T> val,
+                                          MemoryOrder order,
+                                          MemoryScope scope) {
+  using cas_t = typename atomic_compare_exchange_type<sizeof(T)>::type;
+  cas_t oldval = reinterpret_cast<cas_t&>(*dest);
+  T newval = val;
+  cas_t assume = oldval;
+  do {
+    if (Impl::check_early_exit(op, reinterpret_cast<T&>(oldval), val))
+      return reinterpret_cast<T&>(oldval);
+    assume = oldval;
+    newval = op.apply(reinterpret_cast<T&>(assume), val);
+    oldval = desul::atomic_compare_exchange(reinterpret_cast<cas_t*>(dest),
+                                            assume,
+                                            reinterpret_cast<cas_t&>(newval),
+                                            order,
+                                            scope);
+  } while (assume != oldval);
+
+  return newval;
+}
+
+template <class Oper,
+          typename T,
+          class MemoryOrder,
+          class MemoryScope,
+          // equivalent to:
+          //   requires !atomic_always_lock_free(sizeof(T))
+          std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+DESUL_INLINE_FUNCTION T atomic_fetch_oper(const Oper& op,
+                                          T* const dest,
+                                          dont_deduce_this_parameter_t<const T> val,
+                                          MemoryOrder /*order*/,
+                                          MemoryScope scope) {
+#if defined(DESUL_HAVE_FORWARD_PROGRESS)
+  // Acquire a lock for the address
+  while (!Impl::lock_address((void*)dest, scope)) {
+  }
+
+  atomic_thread_fence(MemoryOrderAcquire(), scope);
+  T return_val = *dest;
+  *dest = op.apply(return_val, val);
+  atomic_thread_fence(MemoryOrderRelease(), scope);
+  Impl::unlock_address((void*)dest, scope);
+  return return_val;
+#elif defined(DESUL_HAVE_GPU_LIKE_PROGRESS)
+  // This is a way to avoid dead lock in a warp or wave front
+  T return_val;
+  int done = 0;
+#ifdef __HIPCC__
+  unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
+  unsigned long long int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip((void*)dest, scope)) {
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = *dest;
+        *dest = op.apply(return_val, val);
+        atomic_thread_fence(MemoryOrderRelease(), scope);
+        Impl::unlock_address_hip((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(done);
+  }
+  return return_val;
+// FIXME_SYCL not implemented
+#elif defined(__SYCL_DEVICE_ONLY__)
+  (void)op;
+  (void)dest;
+  (void)scope;
+  (void)return_val;
+  (void)done;
+
+  assert(false);
+  return val;
+#else
+  unsigned int mask = DESUL_IMPL_ACTIVEMASK;
+  unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda((void*)dest, scope)) {
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = *dest;
+        *dest = op.apply(return_val, val);
+        atomic_thread_fence(MemoryOrderRelease(), scope);
+        Impl::unlock_address_cuda((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
+  }
+  return return_val;
+#endif
+#else
+  static_assert(false, "Unimplemented lock based atomic\n");
+  return val;
+#endif
+}
+
+template <class Oper,
+          typename T,
+          class MemoryOrder,
+          class MemoryScope,
+          // equivalent to:
+          //   requires !atomic_always_lock_free(sizeof(T))
+          std::enable_if_t<!atomic_always_lock_free(sizeof(T)), int> = 0>
+DESUL_INLINE_FUNCTION T atomic_oper_fetch(const Oper& op,
+                                          T* const dest,
+                                          dont_deduce_this_parameter_t<const T> val,
+                                          MemoryOrder /*order*/,
+                                          MemoryScope scope) {
+#if defined(DESUL_HAVE_FORWARD_PROGRESS)
+  // Acquire a lock for the address
+  while (!Impl::lock_address((void*)dest, scope)) {
+  }
+
+  atomic_thread_fence(MemoryOrderAcquire(), scope);
+  T return_val = op.apply(*dest, val);
+  *dest = return_val;
+  atomic_thread_fence(MemoryOrderRelease(), scope);
+  Impl::unlock_address((void*)dest, scope);
+  return return_val;
+#elif defined(DESUL_HAVE_GPU_LIKE_PROGRESS)
+  // This is a way to avoid dead lock in a warp or wave front
+  T return_val;
+  int done = 0;
+#ifdef __HIPCC__
+  unsigned long long int active = DESUL_IMPL_BALLOT_MASK(1);
+  unsigned long long int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_hip((void*)dest, scope)) {
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = op.apply(*dest, val);
+        *dest = return_val;
+        atomic_thread_fence(MemoryOrderRelease(), scope);
+        Impl::unlock_address_hip((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(done);
+  }
+  return return_val;
+  // FIXME_SYCL not implemented
+#elif defined(__SYCL_DEVICE_ONLY__)
+  (void)op;
+  (void)dest;
+  (void)scope;
+  (void)done;
+
+  assert(false);
+  return val;
+#else
+  unsigned int mask = DESUL_IMPL_ACTIVEMASK;
+  unsigned int active = DESUL_IMPL_BALLOT_MASK(mask, 1);
+  unsigned int done_active = 0;
+  while (active != done_active) {
+    if (!done) {
+      if (Impl::lock_address_cuda((void*)dest, scope)) {
+        atomic_thread_fence(MemoryOrderAcquire(), scope);
+        return_val = op.apply(*dest, val);
+        *dest = return_val;
+        atomic_thread_fence(MemoryOrderRelease(), scope);
+        Impl::unlock_address_cuda((void*)dest, scope);
+        done = 1;
+      }
+    }
+    done_active = DESUL_IMPL_BALLOT_MASK(mask, done);
+  }
+  return return_val;
+#endif
+#else
+  static_assert(false, "Unimplemented lock based atomic\n");
+  return val;
+#endif
+}
+
+template <class Oper, typename T, class MemoryOrder>
+DESUL_INLINE_FUNCTION T atomic_fetch_oper(const Oper& op,
+                                          T* const dest,
+                                          dont_deduce_this_parameter_t<const T> val,
+                                          MemoryOrder /*order*/,
+                                          MemoryScopeCaller /*scope*/) {
+  T oldval = *dest;
+  *dest = op.apply(oldval, val);
+  return oldval;
+}
+
+template <class Oper, typename T, class MemoryOrder>
+DESUL_INLINE_FUNCTION T atomic_oper_fetch(const Oper& op,
+                                          T* const dest,
+                                          dont_deduce_this_parameter_t<const T> val,
+                                          MemoryOrder /*order*/,
+                                          MemoryScopeCaller /*scope*/) {
+  T oldval = *dest;
+  T newval = op.apply(oldval, val);
+  *dest = newval;
+  return newval;
+}
+
+}  // namespace Impl
+}  // namespace desul
+
+namespace desul {
+
+// Fetch_Oper atomics: return value before operation
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_add(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::AddOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_sub(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::SubOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_max(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::MaxOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_min(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::MinOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_mul(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::MulOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_div(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::DivOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_mod(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::ModOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_and(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::AndOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_or(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::OrOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_xor(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::XorOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_nand(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_fetch_oper(Impl::NandOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_lshift(T* const dest,
+                                            const unsigned int val,
+                                            MemoryOrder order,
+                                            MemoryScope scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::LShiftOper<T, const unsigned int>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_rshift(T* const dest,
+                                            const unsigned int val,
+                                            MemoryOrder order,
+                                            MemoryScope scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::RShiftOper<T, const unsigned int>(), dest, val, order, scope);
+}
+
+// Oper Fetch atomics: return value after operation
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_add_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::AddOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_sub_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::SubOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_max_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::MaxOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_min_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::MinOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_mul_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::MulOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_div_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::DivOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_mod_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::ModOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_and_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::AndOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_or_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::OrOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_xor_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::XorOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_nand_fetch(T* const dest, const T val, MemoryOrder order, MemoryScope scope) {
+  return Impl::atomic_oper_fetch(Impl::NandOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_lshift_fetch(T* const dest,
+                                            const unsigned int val,
+                                            MemoryOrder order,
+                                            MemoryScope scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::LShiftOper<T, const unsigned int>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_rshift_fetch(T* const dest,
+                                            const unsigned int val,
+                                            MemoryOrder order,
+                                            MemoryScope scope) {
+  return Impl::atomic_oper_fetch(
+      Impl::RShiftOper<T, const unsigned int>(), dest, val, order, scope);
+}
+
+// Other atomics
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_load(const T* const dest,
+                                    MemoryOrder order,
+                                    MemoryScope scope) {
+  return Impl::atomic_fetch_oper(
+      Impl::LoadOper<T, const T>(), const_cast<T*>(dest), T(), order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_store(T* const dest,
+                                        const T val,
+                                        MemoryOrder order,
+                                        MemoryScope scope) {
+  (void)Impl::atomic_fetch_oper(Impl::StoreOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_add(T* const dest,
+                                      const T val,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  (void)atomic_fetch_add(dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_sub(T* const dest,
+                                      const T val,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  (void)atomic_fetch_sub(dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_mul(T* const dest,
+                                      const T val,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  (void)atomic_fetch_mul(dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_div(T* const dest,
+                                      const T val,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  (void)atomic_fetch_div(dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_min(T* const dest,
+                                      const T val,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  (void)atomic_fetch_min(dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_max(T* const dest,
+                                      const T val,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  (void)atomic_fetch_max(dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_inc_fetch(T* const dest,
+                                         MemoryOrder order,
+                                         MemoryScope scope) {
+  return atomic_add_fetch(dest, T(1), order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_dec_fetch(T* const dest,
+                                         MemoryOrder order,
+                                         MemoryScope scope) {
+  return atomic_sub_fetch(dest, T(1), order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_inc(T* const dest,
+                                         MemoryOrder order,
+                                         MemoryScope scope) {
+  return atomic_fetch_add(dest, T(1), order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_inc_mod(T* const dest, T val, MemoryOrder order, MemoryScope scope) {
+  static_assert(std::is_unsigned<T>::value,
+                "Signed types not supported by atomic_fetch_inc_mod.");
+  return Impl::atomic_fetch_oper(
+      Impl::IncModOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T atomic_fetch_dec(T* const dest,
+                                         MemoryOrder order,
+                                         MemoryScope scope) {
+  return atomic_fetch_sub(dest, T(1), order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION T
+atomic_fetch_dec_mod(T* const dest, T val, MemoryOrder order, MemoryScope scope) {
+  static_assert(std::is_unsigned<T>::value,
+                "Signed types not supported by atomic_fetch_dec_mod.");
+  return Impl::atomic_fetch_oper(
+      Impl::DecModOper<T, const T>(), dest, val, order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_inc(T* const dest,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  return atomic_add(dest, T(1), order, scope);
+}
+
+template <typename T, class MemoryOrder, class MemoryScope>
+DESUL_INLINE_FUNCTION void atomic_dec(T* const dest,
+                                      MemoryOrder order,
+                                      MemoryScope scope) {
+  return atomic_sub(dest, T(1), order, scope);
+}
+
+// FIXME
+template <typename T,
+          class SuccessMemoryOrder,
+          class FailureMemoryOrder,
+          class MemoryScope>
+DESUL_INLINE_FUNCTION bool atomic_compare_exchange_strong(
+    T* const dest,
+    T& expected,
+    T desired,
+    SuccessMemoryOrder success,
+    FailureMemoryOrder /*failure*/,
+    MemoryScope scope) {
+  T const old = atomic_compare_exchange(dest, expected, desired, success, scope);
+  if (old != expected) {
+    expected = old;
+    return false;
+  } else {
+    return true;
+  }
+}
+
+template <typename T,
+          class SuccessMemoryOrder,
+          class FailureMemoryOrder,
+          class MemoryScope>
+DESUL_INLINE_FUNCTION bool atomic_compare_exchange_weak(T* const dest,
+                                                        T& expected,
+                                                        T desired,
+                                                        SuccessMemoryOrder success,
+                                                        FailureMemoryOrder failure,
+                                                        MemoryScope scope) {
+  return atomic_compare_exchange_strong(
+      dest, expected, desired, success, failure, scope);
+}
+
+}  // namespace desul
+
+#include <desul/atomics/CUDA.hpp>
+#include <desul/atomics/GCC.hpp>
+#include <desul/atomics/HIP.hpp>
+#include <desul/atomics/OpenMP.hpp>
+#include <desul/atomics/SYCL.hpp>
+#if defined(__GNUC__) && (!defined(__clang__))
+#pragma GCC diagnostic pop
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/HIP.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/HIP.hpp
new file mode 100644 (file)
index 0000000..e51406e
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_HIP_HPP_
+#define DESUL_ATOMICS_HIP_HPP_
+
+#ifdef __HIP_DEVICE_COMPILE__
+namespace desul {
+
+// header file is organized as follows:
+//   1/ device-side overload set from atomic functions provided by HIP
+//   2/ fallback implementation on host-side for atomic functions defined in 1/ that are
+//      not included in the GCC overload set
+//   3/ fallback implementation on device-side for atomic functions from the GCC
+//      overload set that are not defined in 1/
+
+// clang-format off
+inline __device__                int atomic_fetch_add(               int* ptr,                int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+inline __device__       unsigned int atomic_fetch_add(      unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+inline __device__ unsigned long long atomic_fetch_add(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+inline __device__              float atomic_fetch_add(             float* ptr,              float val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+inline __device__             double atomic_fetch_add(            double* ptr,             double val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, val); }
+
+inline __device__                int atomic_fetch_sub(               int* ptr,                int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, val); }
+inline __device__       unsigned int atomic_fetch_sub(      unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, val); }
+inline __device__ unsigned long long atomic_fetch_sub(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
+inline __device__              float atomic_fetch_sub(             float* ptr,              float val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
+inline __device__             double atomic_fetch_sub(            double* ptr,             double val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -val); }
+
+inline __device__                int atomic_fetch_min(               int* ptr,                int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
+inline __device__       unsigned int atomic_fetch_min(      unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
+inline __device__ unsigned long long atomic_fetch_min(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMin(ptr, val); }
+
+inline __device__                int atomic_fetch_max(               int* ptr,                int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
+inline __device__       unsigned int atomic_fetch_max(      unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
+inline __device__ unsigned long long atomic_fetch_max(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicMax(ptr, val); }
+
+inline __device__                int atomic_fetch_and(               int* ptr,                int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
+inline __device__       unsigned int atomic_fetch_and(      unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
+inline __device__ unsigned long long atomic_fetch_and(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAnd(ptr, val); }
+
+inline __device__                int atomic_fetch_or (               int* ptr,                int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
+inline __device__       unsigned int atomic_fetch_or (      unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
+inline __device__ unsigned long long atomic_fetch_or (unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicOr (ptr, val); }
+
+inline __device__                int atomic_fetch_xor(               int* ptr,                int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
+inline __device__       unsigned int atomic_fetch_xor(      unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
+inline __device__ unsigned long long atomic_fetch_xor(unsigned long long* ptr, unsigned long long val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicXor(ptr, val); }
+
+inline __device__                int atomic_fetch_inc(               int* ptr,                         MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1   ); }
+inline __device__       unsigned int atomic_fetch_inc(      unsigned int* ptr,                         MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1u  ); }
+inline __device__ unsigned long long atomic_fetch_inc(unsigned long long* ptr,                         MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, 1ull); }
+
+inline __device__                int atomic_fetch_dec(               int* ptr,                         MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, 1   ); }
+inline __device__       unsigned int atomic_fetch_dec(      unsigned int* ptr,                         MemoryOrderRelaxed, MemoryScopeDevice) { return atomicSub(ptr, 1u  ); }
+inline __device__ unsigned long long atomic_fetch_dec(unsigned long long* ptr,                         MemoryOrderRelaxed, MemoryScopeDevice) { return atomicAdd(ptr, -1  ); }
+
+inline __device__       unsigned int atomic_fetch_inc_mod(  unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicInc(ptr, val); }
+inline __device__       unsigned int atomic_fetch_dec_mod(  unsigned int* ptr,       unsigned int val, MemoryOrderRelaxed, MemoryScopeDevice) { return atomicDec(ptr, val); }
+// clang-format on
+
+#define DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, TYPE)                         \
+  template <class MemoryOrder>                                                  \
+  inline __device__ TYPE atomic_fetch_##OP(                                     \
+      TYPE* ptr, TYPE val, MemoryOrder, MemoryScopeDevice) {                    \
+    __threadfence();                                                            \
+    TYPE return_val =                                                           \
+        atomic_fetch_##OP(ptr, val, MemoryOrderRelaxed(), MemoryScopeDevice()); \
+    __threadfence();                                                            \
+    return return_val;                                                          \
+  }                                                                             \
+  template <class MemoryOrder>                                                  \
+  inline __device__ TYPE atomic_fetch_##OP(                                     \
+      TYPE* ptr, TYPE val, MemoryOrder, MemoryScopeCore) {                      \
+    return atomic_fetch_##OP(ptr, val, MemoryOrder(), MemoryScopeDevice());     \
+  }
+
+#define DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(OP) \
+  DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, int)           \
+  DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, unsigned int)  \
+  DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, unsigned long long)
+
+#define DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(OP) \
+  DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, float)               \
+  DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(OP, double)
+
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(min)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(max)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(and)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(or)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(xor)
+
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(add)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(add)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT(sub)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(sub)
+
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(inc)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL(dec)
+
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(inc_mod, unsigned int)
+DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP(dec_mod, unsigned int)
+
+#undef DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_FLOATING_POINT
+#undef DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP_INTEGRAL
+#undef DESUL_IMPL_HIP_DEVICE_ATOMIC_FETCH_OP
+
+// 2/ host-side fallback implementation for atomic functions not provided by GCC
+
+#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, TYPE) \
+  template <class MemoryOrder>                                                      \
+  inline __host__ TYPE atomic_fetch_##OP_LOWERCASE(                                 \
+      TYPE* ptr, TYPE val, MemoryOrder order, MemoryScopeDevice scope) {            \
+    return Impl::atomic_fetch_oper(                                                 \
+        Impl::OP_PASCAL_CASE##Oper<TYPE, const TYPE>(), ptr, val, order, scope);    \
+  }                                                                                 \
+  template <class MemoryOrder>                                                      \
+  inline __host__ TYPE atomic_fetch_##OP_LOWERCASE(                                 \
+      TYPE* ptr, TYPE val, MemoryOrder order, MemoryScopeCore scope) {              \
+    return Impl::atomic_fetch_oper(                                                 \
+        Impl::OP_PASCAL_CASE##Oper<TYPE, const TYPE>(), ptr, val, order, scope);    \
+  }
+
+#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL(OP_LOWERCASE, OP_PASCAL_CASE) \
+  DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, int)           \
+  DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, unsigned int)  \
+  DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(                                             \
+      OP_LOWERCASE, OP_PASCAL_CASE, unsigned long long)
+
+#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT(OP_LOWERCASE,   \
+                                                               OP_PASCAL_CASE) \
+  DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, float) \
+  DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE, double)
+
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL(min, Min)
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL(max, Max)
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT(add, Add)
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT(sub, Sub)
+
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(inc_mod, IncMod, unsigned int)
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN(dec_mod, DecMod, unsigned int)
+
+#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_FLOATING_POINT
+#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN_INTEGRAL
+#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_FUN
+
+#define DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(TYPE) \
+  template <class MemoryOrder>                                        \
+  inline __host__ TYPE atomic_fetch_inc(                              \
+      TYPE* ptr, MemoryOrder order, MemoryScopeDevice scope) {        \
+    return atomic_fetch_add(ptr, static_cast<TYPE>(1), order, scope); \
+  }                                                                   \
+  template <class MemoryOrder>                                        \
+  inline __host__ TYPE atomic_fetch_inc(                              \
+      TYPE* ptr, MemoryOrder order, MemoryScopeCore scope) {          \
+    return atomic_fetch_add(ptr, static_cast<TYPE>(1), order, scope); \
+  }                                                                   \
+  template <class MemoryOrder>                                        \
+  inline __host__ TYPE atomic_fetch_dec(                              \
+      TYPE* ptr, MemoryOrder order, MemoryScopeDevice scope) {        \
+    return atomic_fetch_sub(ptr, static_cast<TYPE>(1), order, scope); \
+  }                                                                   \
+  template <class MemoryOrder>                                        \
+  inline __host__ TYPE atomic_fetch_dec(                              \
+      TYPE* ptr, MemoryOrder order, MemoryScopeCore scope) {          \
+    return atomic_fetch_sub(ptr, static_cast<TYPE>(1), order, scope); \
+  }
+
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(int)
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(unsigned int)
+DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT(unsigned long long)
+
+#undef DESUL_IMPL_HIP_HOST_FALLBACK_ATOMIC_INCREMENT_DECREMENT
+
+// 3/ device-side fallback implementation for atomic functions defined in GCC overload
+// set
+
+#define DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(             \
+    OP_LOWERCASE, OP_PASCAL_CASE, MEMORY_ORDER, MEMORY_SCOPE)              \
+  template <class T>                                                       \
+  inline __device__ std::enable_if_t<std::is_integral<T>::value, T>        \
+      atomic_##OP_LOWERCASE##_fetch(                                       \
+          T* ptr, T val, MEMORY_ORDER order, MEMORY_SCOPE scope) {         \
+    return Impl::atomic_oper_fetch(                                        \
+        Impl::OP_PASCAL_CASE##Oper<T, const T>(), ptr, val, order, scope); \
+  }                                                                        \
+  template <class T>                                                       \
+  inline __device__ std::enable_if_t<std::is_integral<T>::value, T>        \
+      atomic_fetch_##OP_LOWERCASE(                                         \
+          T* ptr, T val, MEMORY_ORDER order, MEMORY_SCOPE scope) {         \
+    return Impl::atomic_fetch_oper(                                        \
+        Impl::OP_PASCAL_CASE##Oper<T, const T>(), ptr, val, order, scope); \
+  }
+
+// clang-format off
+#define DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(OP_LOWERCASE, OP_PASCAL_CASE) \
+  DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderRelaxed, MemoryScopeNode) \
+  DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderRelaxed, MemoryScopeDevice) \
+  DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderRelaxed, MemoryScopeCore) \
+  DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderSeqCst,  MemoryScopeNode) \
+  DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderSeqCst,  MemoryScopeDevice) \
+  DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE(OP_LOWERCASE, OP_PASCAL_CASE, MemoryOrderSeqCst,  MemoryScopeCore)
+// clang-format on
+
+DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(add, Add)
+DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(sub, Sub)
+DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(and, And)
+DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(or, Or)
+DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(xor, Xor)
+DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN(nand, Nand)
+
+#undef DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN
+#undef DESUL_IMPL_HIP_DEVICE_FALLBACK_ATOMIC_FUN_ORDER_SCOPE
+
+}  // namespace desul
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array.hpp
new file mode 100644 (file)
index 0000000..6b2d4e7
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_ARRAY_HPP_
+#define DESUL_ATOMICS_LOCK_ARRAY_HPP_
+
+#include "desul/atomics/Compare_Exchange.hpp"
+#include "desul/atomics/Lock_Array_Cuda.hpp"
+#include "desul/atomics/Lock_Array_HIP.hpp"
+#include "desul/atomics/Macros.hpp"
+
+namespace desul {
+namespace Impl {
+struct host_locks__ {
+  static constexpr uint32_t HOST_SPACE_ATOMIC_MASK = 0xFFFF;
+  static constexpr uint32_t HOST_SPACE_ATOMIC_XOR_MASK = 0x5A39;
+  template <typename is_always_void = void>
+  static int32_t* get_host_locks_() {
+    static int32_t HOST_SPACE_ATOMIC_LOCKS_DEVICE[HOST_SPACE_ATOMIC_MASK + 1] = {0};
+    return HOST_SPACE_ATOMIC_LOCKS_DEVICE;
+  }
+  static inline int32_t* get_host_lock_(void* ptr) {
+    return &get_host_locks_()[((uint64_t(ptr) >> 2) & HOST_SPACE_ATOMIC_MASK) ^
+                              HOST_SPACE_ATOMIC_XOR_MASK];
+  }
+};
+
+inline void init_lock_arrays() {
+  static bool is_initialized = false;
+  if (!is_initialized) {
+    host_locks__::get_host_locks_();
+    is_initialized = true;
+  }
+
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+  init_lock_arrays_cuda();
+#endif
+
+#ifdef DESUL_HAVE_HIP_ATOMICS
+  init_lock_arrays_hip();
+#endif
+}
+
+inline void finalize_lock_arrays() {
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+  finalize_lock_arrays_cuda();
+#endif
+
+#ifdef DESUL_HAVE_HIP_ATOMICS
+  finalize_lock_arrays_hip();
+#endif
+}
+template <typename MemoryScope>
+inline bool lock_address(void* ptr, MemoryScope ms) {
+  return 0 ==
+         atomic_exchange(
+             host_locks__::get_host_lock_(ptr), int32_t(1), MemoryOrderSeqCst(), ms);
+}
+template <typename MemoryScope>
+void unlock_address(void* ptr, MemoryScope ms) {
+  (void)atomic_exchange(
+      host_locks__::get_host_lock_(ptr), int32_t(0), MemoryOrderSeqCst(), ms);
+}
+}  // namespace Impl
+}  // namespace desul
+
+#endif  // DESUL_ATOMICS_LOCK_ARRAY_HPP_
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array_Cuda.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array_Cuda.hpp
new file mode 100644 (file)
index 0000000..2166fa3
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_ARRAY_CUDA_HPP_
+#define DESUL_ATOMICS_LOCK_ARRAY_CUDA_HPP_
+
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Macros.hpp"
+
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+
+#include <cstdint>
+
+namespace desul {
+namespace Impl {
+
+#ifdef __CUDA_ARCH__
+#define DESUL_IMPL_BALLOT_MASK(m, x) __ballot_sync(m, x)
+#define DESUL_IMPL_ACTIVEMASK __activemask()
+#else
+#define DESUL_IMPL_BALLOT_MASK(m, x) m == 0 ? 0 : 1
+#define DESUL_IMPL_ACTIVEMASK 0
+#endif
+
+/// \brief This global variable in Host space is the central definition
+///        of these arrays.
+extern int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h;
+extern int32_t* CUDA_SPACE_ATOMIC_LOCKS_NODE_h;
+
+/// \brief After this call, the g_host_cuda_lock_arrays variable has
+///        valid, initialized arrays.
+///
+/// This call is idempotent.
+/// The function is templated to make it a weak symbol to deal with Kokkos/RAJA
+///   snapshotted version while also linking against pure Desul
+template <typename /*AlwaysInt*/ = int>
+void init_lock_arrays_cuda();
+
+/// \brief After this call, the g_host_cuda_lock_arrays variable has
+///        all null pointers, and all array memory has been freed.
+///
+/// This call is idempotent.
+/// The function is templated to make it a weak symbol to deal with Kokkos/RAJA
+///   snapshotted version while also linking against pure Desul
+template <typename /*AlwaysInt*/ = int>
+void finalize_lock_arrays_cuda();
+
+}  // namespace Impl
+}  // namespace desul
+
+#if defined(__CUDACC__)
+
+namespace desul {
+namespace Impl {
+
+/// \brief This global variable in CUDA space is what kernels use
+///        to get access to the lock arrays.
+///
+/// When relocatable device code is enabled, there can be one single
+/// instance of this global variable for the entire executable,
+/// whose definition will be in Kokkos_Cuda_Locks.cpp (and whose declaration
+/// here must then be extern.
+/// This one instance will be initialized by initialize_host_cuda_lock_arrays
+/// and need not be modified afterwards.
+///
+/// When relocatable device code is disabled, an instance of this variable
+/// will be created in every translation unit that sees this header file
+/// (we make this clear by marking it static, meaning no other translation
+///  unit can link to it).
+/// Since the Kokkos_Cuda_Locks.cpp translation unit cannot initialize the
+/// instances in other translation units, we must update this CUDA global
+/// variable based on the Host global variable prior to running any kernels
+/// that will use it.
+/// That is the purpose of the ensure_cuda_lock_arrays_on_device function.
+__device__
+#ifdef __CUDACC_RDC__
+    __constant__ extern
+#endif
+    int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE;
+
+__device__
+#ifdef __CUDACC_RDC__
+    __constant__ extern
+#endif
+    int32_t* CUDA_SPACE_ATOMIC_LOCKS_NODE;
+
+#define CUDA_SPACE_ATOMIC_MASK 0x1FFFF
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+__device__ inline bool lock_address_cuda(void* ptr, desul::MemoryScopeDevice) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & CUDA_SPACE_ATOMIC_MASK;
+  return (0 == atomicExch(&desul::Impl::CUDA_SPACE_ATOMIC_LOCKS_DEVICE[offset], 1));
+}
+__device__ inline bool lock_address_cuda(void* ptr, desul::MemoryScopeNode) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & CUDA_SPACE_ATOMIC_MASK;
+  return (0 == atomicExch(&desul::Impl::CUDA_SPACE_ATOMIC_LOCKS_NODE[offset], 1));
+}
+
+/// \brief Release lock for the address
+///
+/// This function releases the lock for the hash value derived
+/// from the provided ptr. This function should only be called
+/// after previously successfully acquiring a lock with
+/// lock_address.
+__device__ inline void unlock_address_cuda(void* ptr, desul::MemoryScopeDevice) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & CUDA_SPACE_ATOMIC_MASK;
+  atomicExch(&desul::Impl::CUDA_SPACE_ATOMIC_LOCKS_DEVICE[offset], 0);
+}
+__device__ inline void unlock_address_cuda(void* ptr, desul::MemoryScopeNode) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & CUDA_SPACE_ATOMIC_MASK;
+  atomicExch(&desul::Impl::CUDA_SPACE_ATOMIC_LOCKS_NODE[offset], 0);
+}
+
+}  // namespace Impl
+}  // namespace desul
+
+// Make lock_array_copied an explicit translation unit scope thingy
+namespace desul {
+namespace Impl {
+namespace {
+static int lock_array_copied = 0;
+inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
+}  // namespace
+
+#ifdef __CUDACC_RDC__
+inline
+#else
+static
+#endif
+    void
+    copy_cuda_lock_arrays_to_device() {
+  if (lock_array_copied == 0) {
+    cudaMemcpyToSymbol(CUDA_SPACE_ATOMIC_LOCKS_DEVICE,
+                       &CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h,
+                       sizeof(int32_t*));
+    cudaMemcpyToSymbol(CUDA_SPACE_ATOMIC_LOCKS_NODE,
+                       &CUDA_SPACE_ATOMIC_LOCKS_NODE_h,
+                       sizeof(int32_t*));
+  }
+  lock_array_copied = 1;
+}
+
+}  // namespace Impl
+}  // namespace desul
+
+#endif /* defined( __CUDACC__ ) */
+
+#endif /* defined( DESUL_HAVE_CUDA_ATOMICS ) */
+
+namespace desul {
+
+#if defined(__CUDACC_RDC__) || (!defined(__CUDACC__))
+inline void ensure_cuda_lock_arrays_on_device() {}
+#else
+static inline void ensure_cuda_lock_arrays_on_device() {
+  Impl::copy_cuda_lock_arrays_to_device();
+}
+#endif
+
+}  // namespace desul
+
+#endif /* #ifndef DESUL_ATOMICS_LOCK_ARRAY_CUDA_HPP_ */
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array_HIP.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Lock_Array_HIP.hpp
new file mode 100644 (file)
index 0000000..7c843f2
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_LOCK_ARRAY_HIP_HPP_
+#define DESUL_ATOMICS_LOCK_ARRAY_HIP_HPP_
+
+#include "desul/atomics/Common.hpp"
+#include "desul/atomics/Macros.hpp"
+
+#ifdef DESUL_HAVE_HIP_ATOMICS
+
+#include <hip/hip_runtime.h>
+
+#include <cstdint>
+
+namespace desul {
+namespace Impl {
+
+#ifdef __HIP_DEVICE_COMPILE__
+#define DESUL_IMPL_BALLOT_MASK(x) __ballot(x)
+#else
+#define DESUL_IMPL_BALLOT_MASK(x) 0
+#endif
+
+/**
+ * \brief This global variable in Host space is the central definition of these
+ * arrays.
+ */
+extern int32_t* HIP_SPACE_ATOMIC_LOCKS_DEVICE_h;
+extern int32_t* HIP_SPACE_ATOMIC_LOCKS_NODE_h;
+
+/// \brief After this call, the g_host_cuda_lock_arrays variable has
+///        valid, initialized arrays.
+///
+/// This call is idempotent.
+/// The function is templated to make it a weak symbol to deal with Kokkos/RAJA
+///   snapshotted version while also linking against pure Desul
+template <typename /*AlwaysInt*/ = int>
+void init_lock_arrays_hip();
+
+/// \brief After this call, the g_host_cuda_lock_arrays variable has
+///        all null pointers, and all array memory has been freed.
+///
+/// This call is idempotent.
+/// The function is templated to make it a weak symbol to deal with Kokkos/RAJA
+///   snapshotted version while also linking against pure Desul
+template <typename /*AlwaysInt*/ = int>
+void finalize_lock_arrays_hip();
+}  // namespace Impl
+}  // namespace desul
+
+#ifdef __HIPCC__
+namespace desul {
+namespace Impl {
+
+/**
+ * \brief This global variable in HIP space is what kernels use to get access
+ * to the lock arrays.
+ *
+ * When relocatable device code is enabled, there can be one single instance of
+ * this global variable for the entire executable, whose definition will be in
+ * Kokkos_HIP_Locks.cpp (and whose declaration here must then be extern.  This
+ * one instance will be initialized by initialize_host_hip_lock_arrays and need
+ * not be modified afterwards.
+ *
+ * When relocatable device code is disabled, an instance of this variable will
+ * be created in every translation unit that sees this header file (we make this
+ * clear by marking it static, meaning no other translation unit can link to
+ * it). Since the Kokkos_HIP_Locks.cpp translation unit cannot initialize the
+ * instances in other translation units, we must update this CUDA global
+ * variable based on the Host global variable prior to running any kernels that
+ * will use it.  That is the purpose of the
+ * KOKKOS_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE macro.
+ */
+__device__
+#ifdef DESUL_HIP_RDC
+    __constant__ extern
+#endif
+    int32_t* HIP_SPACE_ATOMIC_LOCKS_DEVICE;
+
+__device__
+#ifdef DESUL_HIP_RDC
+    __constant__ extern
+#endif
+    int32_t* HIP_SPACE_ATOMIC_LOCKS_NODE;
+
+#define HIP_SPACE_ATOMIC_MASK 0x1FFFF
+
+/// \brief Acquire a lock for the address
+///
+/// This function tries to acquire the lock for the hash value derived
+/// from the provided ptr. If the lock is successfully acquired the
+/// function returns true. Otherwise it returns false.
+__device__ inline bool lock_address_hip(void* ptr, desul::MemoryScopeDevice) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & HIP_SPACE_ATOMIC_MASK;
+  return (0 == atomicExch(&desul::Impl::HIP_SPACE_ATOMIC_LOCKS_DEVICE[offset], 1));
+}
+
+__device__ inline bool lock_address_hip(void* ptr, desul::MemoryScopeNode) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & HIP_SPACE_ATOMIC_MASK;
+  return (0 == atomicExch(&desul::Impl::HIP_SPACE_ATOMIC_LOCKS_NODE[offset], 1));
+}
+
+/**
+ * \brief Release lock for the address
+ *
+ * This function releases the lock for the hash value derived from the provided
+ * ptr. This function should only be called after previously successfully
+ * acquiring a lock with lock_address.
+ */
+__device__ inline void unlock_address_hip(void* ptr, desul::MemoryScopeDevice) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & HIP_SPACE_ATOMIC_MASK;
+  atomicExch(&desul::Impl::HIP_SPACE_ATOMIC_LOCKS_DEVICE[offset], 0);
+}
+
+__device__ inline void unlock_address_hip(void* ptr, desul::MemoryScopeNode) {
+  size_t offset = size_t(ptr);
+  offset = offset >> 2;
+  offset = offset & HIP_SPACE_ATOMIC_MASK;
+  atomicExch(&desul::Impl::HIP_SPACE_ATOMIC_LOCKS_NODE[offset], 0);
+}
+#endif
+}  // namespace Impl
+}  // namespace desul
+
+// Make lock_array_copied an explicit translation unit scope thing
+namespace desul {
+namespace Impl {
+namespace {
+static int lock_array_copied = 0;
+inline int eliminate_warning_for_lock_array() { return lock_array_copied; }
+}  // namespace
+}  // namespace Impl
+}  // namespace desul
+
+/* It is critical that this code be a macro, so that it will
+   capture the right address for g_device_hip_lock_arrays!
+   putting this in an inline function will NOT do the right thing! */
+#define DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE()                                   \
+  {                                                                                   \
+    if (::desul::Impl::lock_array_copied == 0) {                                      \
+      (void)hipMemcpyToSymbol(                                                        \
+          HIP_SYMBOL(::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_DEVICE),                   \
+          &::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_DEVICE_h,                            \
+          sizeof(int32_t*));                                                          \
+      (void)hipMemcpyToSymbol(HIP_SYMBOL(::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_NODE), \
+                              &::desul::Impl::HIP_SPACE_ATOMIC_LOCKS_NODE_h,          \
+                              sizeof(int32_t*));                                      \
+    }                                                                                 \
+    ::desul::Impl::lock_array_copied = 1;                                             \
+  }
+
+#endif
+
+#if defined(DESUL_HIP_RDC) || (!defined(__HIPCC__))
+#define DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE()
+#else
+#define DESUL_ENSURE_HIP_LOCK_ARRAYS_ON_DEVICE() \
+  DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE()
+#endif
+
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Macros.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/Macros.hpp
new file mode 100644 (file)
index 0000000..0890b2d
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_MACROS_HPP_
+#define DESUL_ATOMICS_MACROS_HPP_
+
+// Macros
+
+#if (!defined(__CUDA_ARCH__) || !defined(__NVCC__)) &&                       \
+    (!defined(__HIP_DEVICE_COMPILE) || !defined(__HIP_PLATFORM_HCC__)) &&    \
+    !defined(__SYCL_DEVICE_ONLY__) && !defined(DESUL_HAVE_OPENMP_ATOMICS) && \
+    !defined(DESUL_HAVE_SERIAL_ATOMICS)
+#define DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS
+#endif
+
+// ONLY use GNUC atomics if not compiling for the device
+// and we didn't explicitly say to use OPENMP or SERIAL atomics
+#if defined(__GNUC__) && defined(DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS)
+#define DESUL_HAVE_GCC_ATOMICS
+#endif
+
+// Equivalent to above: if we are compiling for the device we
+// need to use CUDA/HIP/SYCL atomics instead of MSVC atomics
+#if defined(_MSC_VER) && defined(DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS)
+#define DESUL_HAVE_MSVC_ATOMICS
+#endif
+
+#undef DESUL_IMPL_HAVE_GCC_OR_MSVC_ATOMICS
+
+#ifdef __CUDACC__
+#define DESUL_HAVE_CUDA_ATOMICS
+#endif
+
+#ifdef __HIPCC__
+#define DESUL_HAVE_HIP_ATOMICS
+#endif
+
+#ifdef __SYCL_DEVICE_ONLY__
+#define DESUL_HAVE_SYCL_ATOMICS
+#endif
+
+#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) || \
+    defined(__SYCL_DEVICE_ONLY__)
+#define DESUL_HAVE_GPU_LIKE_PROGRESS
+#endif
+
+#if defined(DESUL_HAVE_CUDA_ATOMICS) || defined(DESUL_HAVE_HIP_ATOMICS)
+#define DESUL_FORCEINLINE_FUNCTION inline __host__ __device__
+#define DESUL_INLINE_FUNCTION inline __host__ __device__
+#define DESUL_FUNCTION __host__ __device__
+#else
+#define DESUL_FORCEINLINE_FUNCTION inline
+#define DESUL_INLINE_FUNCTION inline
+#define DESUL_FUNCTION
+#endif
+
+#if !defined(DESUL_HAVE_GPU_LIKE_PROGRESS)
+#define DESUL_HAVE_FORWARD_PROGRESS
+#endif
+
+#endif  // DESUL_ATOMICS_MACROS_HPP_
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/OpenMP.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/OpenMP.hpp
new file mode 100644 (file)
index 0000000..bc6fb26
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_OPENMP_HPP_
+#define DESUL_ATOMICS_OPENMP_HPP_
+
+#ifdef DESUL_HAVE_OPENMP_ATOMICS
+
+#include <desul/atomics/openmp/OpenMP_40.hpp>
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/SYCL.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/SYCL.hpp
new file mode 100644 (file)
index 0000000..da34564
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+#ifndef DESUL_ATOMICS_SYCL_HPP_
+#define DESUL_ATOMICS_SYCL_HPP_
+
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+
+// clang-format off
+#include "desul/atomics/SYCLConversions.hpp"
+#include "desul/atomics/Common.hpp"
+// clang-format on
+
+namespace desul {
+
+#define DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, TYPE)                              \
+  template <class MemoryOrder>                                                     \
+  TYPE atomic_fetch_##OPER(TYPE* dest, TYPE val, MemoryOrder, MemoryScopeDevice) { \
+    Impl::sycl_atomic_ref<TYPE, MemoryOrder, MemoryScopeDevice> dest_ref(*dest);   \
+    return dest_ref.fetch_##OPER(val);                                             \
+  }                                                                                \
+  template <class MemoryOrder>                                                     \
+  TYPE atomic_fetch_##OPER(TYPE* dest, TYPE val, MemoryOrder, MemoryScopeCore) {   \
+    Impl::sycl_atomic_ref<TYPE, MemoryOrder, MemoryScopeCore> dest_ref(*dest);     \
+    return dest_ref.fetch_##OPER(val);                                             \
+  }
+
+#define DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(OPER) \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, int)           \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, unsigned int)  \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, long)          \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, unsigned long) \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, long long)     \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, unsigned long long)
+
+#define DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_FLOATING_POINT(OPER) \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, float)               \
+  DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER(OPER, double)
+
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(add)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(sub)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(and)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(or)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(xor)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(min)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL(max)
+
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_FLOATING_POINT(add)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_FLOATING_POINT(sub)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_FLOATING_POINT(min)
+DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_FLOATING_POINT(max)
+
+#undef DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_FLOATING_POINT
+#undef DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER_INTEGRAL
+#undef DESUL_IMPL_SYCL_ATOMIC_FETCH_OPER
+
+}  // namespace desul
+
+#endif  // DESUL_HAVE_SYCL_ATOMICS
+#endif  // DESUL_ATOMICS_SYCL_HPP_
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/SYCLConversions.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/SYCLConversions.hpp
new file mode 100644 (file)
index 0000000..7debf91
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_SYCL_CONVERSIONS_HPP_
+#define DESUL_ATOMICS_SYCL_CONVERSIONS_HPP_
+#ifdef DESUL_HAVE_SYCL_ATOMICS
+
+// clang-format off
+#include "desul/atomics/Common.hpp"
+
+#include <CL/sycl.hpp>
+// clang-format on
+
+namespace desul {
+namespace Impl {
+
+#ifdef __clang__
+namespace sycl_sync_and_atomics = ::sycl::ext::oneapi;
+#else
+namespace sycl_sync_and_atomics = ::sycl;
+#endif
+
+template <bool extended_namespace>
+using sycl_memory_order = std::conditional_t<extended_namespace,
+                                             sycl_sync_and_atomics::memory_order,
+                                             sycl::memory_order>;
+template <bool extended_namespace>
+using sycl_memory_scope = std::conditional_t<extended_namespace,
+                                             sycl_sync_and_atomics::memory_scope,
+                                             sycl::memory_scope>;
+
+template <class MemoryOrder, bool extended_namespace = true>
+struct DesulToSYCLMemoryOrder;
+template <bool extended_namespace>
+struct DesulToSYCLMemoryOrder<MemoryOrderSeqCst, extended_namespace> {
+  static constexpr sycl_memory_order<extended_namespace> value =
+      sycl_memory_order<extended_namespace>::seq_cst;
+};
+template <bool extended_namespace>
+struct DesulToSYCLMemoryOrder<MemoryOrderAcquire, extended_namespace> {
+  static constexpr sycl_memory_order<extended_namespace> value =
+      sycl_memory_order<extended_namespace>::acquire;
+};
+template <bool extended_namespace>
+struct DesulToSYCLMemoryOrder<MemoryOrderRelease, extended_namespace> {
+  static constexpr sycl_memory_order<extended_namespace> value =
+      sycl_memory_order<extended_namespace>::release;
+};
+template <bool extended_namespace>
+struct DesulToSYCLMemoryOrder<MemoryOrderAcqRel, extended_namespace> {
+  static constexpr sycl_memory_order<extended_namespace> value =
+      sycl_memory_order<extended_namespace>::acq_rel;
+};
+template <bool extended_namespace>
+struct DesulToSYCLMemoryOrder<MemoryOrderRelaxed, extended_namespace> {
+  static constexpr sycl_memory_order<extended_namespace> value =
+      sycl_memory_order<extended_namespace>::relaxed;
+};
+
+template <class MemoryScope, bool extended_namespace = true>
+struct DesulToSYCLMemoryScope;
+template <bool extended_namespace>
+struct DesulToSYCLMemoryScope<MemoryScopeCore, extended_namespace> {
+  static constexpr sycl_memory_scope<extended_namespace> value =
+      sycl_memory_scope<extended_namespace>::work_group;
+};
+template <bool extended_namespace>
+struct DesulToSYCLMemoryScope<MemoryScopeDevice, extended_namespace> {
+  static constexpr sycl_memory_scope<extended_namespace> value =
+      sycl_memory_scope<extended_namespace>::device;
+};
+template <bool extended_namespace>
+struct DesulToSYCLMemoryScope<MemoryScopeSystem, extended_namespace> {
+  static constexpr sycl_memory_scope<extended_namespace> value =
+      sycl_memory_scope<extended_namespace>::system;
+};
+
+// FIXME_SYCL generic_space isn't available yet for CUDA.
+#ifdef __NVPTX__
+template <class T, class MemoryOrder, class MemoryScope>
+using sycl_atomic_ref = sycl::atomic_ref<T,
+                                         DesulToSYCLMemoryOrder<MemoryOrder>::value,
+                                         DesulToSYCLMemoryScope<MemoryScope>::value,
+                                         sycl::access::address_space::global_space>;
+#else
+template <class T, class MemoryOrder, class MemoryScope>
+using sycl_atomic_ref = sycl::atomic_ref<T,
+                                         DesulToSYCLMemoryOrder<MemoryOrder>::value,
+                                         DesulToSYCLMemoryScope<MemoryScope>::value,
+                                         sycl::access::address_space::generic_space>;
+#endif
+}  // namespace Impl
+}  // namespace desul
+
+#endif
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/CUDA_asm.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/CUDA_asm.hpp
new file mode 100644 (file)
index 0000000..461d3e0
--- /dev/null
@@ -0,0 +1,18 @@
+#include<limits>
+namespace desul {
+#if defined(__CUDA_ARCH__)  || (defined(__clang__) && !defined(__NVCC__))
+// Choose the variant of atomics we are using later
+#if !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_GENERIC) && \
+    !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE) && \
+    !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL) && \
+    !defined(DESUL_IMPL_ATOMIC_CUDA_PTX_FORCEGLOBAL)
+#if (__CUDACC_VER_MAJOR__ > 11) || ((__CUDACC_VER_MAJOR__==11) && (__CUDACC_VER_MINOR__>1))
+#define DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
+#else
+#define DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
+#endif
+#endif
+#include<desul/atomics/cuda/cuda_cc7_asm.inc>
+
+#endif
+}
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/CUDA_asm_exchange.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/CUDA_asm_exchange.hpp
new file mode 100644 (file)
index 0000000..0ab95e6
--- /dev/null
@@ -0,0 +1,8 @@
+#include<limits>
+namespace desul {
+#if defined(__CUDA_ARCH__)  || (defined(__clang__) && !defined(__NVCC__))
+
+#include<desul/atomics/cuda/cuda_cc7_asm_exchange.inc>
+
+#endif
+}
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm.inc
new file mode 100644 (file)
index 0000000..2bc64a7
--- /dev/null
@@ -0,0 +1,20 @@
+
+// Non returning atomic operation (ptx red instruction) only exists for relaxed and release memorder
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE MemoryScopeDevice
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".gpu"
+#include "desul/atomics/cuda/cuda_cc7_asm_memorder.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE MemoryScopeNode
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".sys"
+#include "desul/atomics/cuda/cuda_cc7_asm_memorder.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE MemoryScopeCore
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".cta"
+#include "desul/atomics/cuda/cuda_cc7_asm_memorder.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc
new file mode 100644 (file)
index 0000000..6de590a
--- /dev/null
@@ -0,0 +1,18 @@
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_GENERIC
+#include "cuda_cc7_asm_atomic_fetch_op.inc_generic"
+#endif
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
+#include "cuda_cc7_asm_atomic_fetch_op.inc_isglobal"
+#endif
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
+#include "cuda_cc7_asm_atomic_fetch_op.inc_predicate"
+#endif
+
+// This version is not generally safe
+// Only here for performance comparison purposes
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_FORCEGLOBAL
+#include "cuda_cc7_asm_atomic_fetch_op.inc_forceglobal"
+#endif
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_forceglobal b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_forceglobal
new file mode 100644 (file)
index 0000000..b235163
--- /dev/null
@@ -0,0 +1,153 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops:
+
+// binary operations
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+// Fetch atomics
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile("atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  ctype neg_value = -value; \
+  asm volatile("atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(neg_value) : "memory"); \
+  return result; \
+}
+
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile("atom.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile("atom.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  asm volatile("atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  asm volatile("atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  asm volatile("atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  asm volatile("atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+}
+
+// Group ops for integer ctypes
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR()
+
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
+
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_generic b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_generic
new file mode 100644 (file)
index 0000000..0484d10
--- /dev/null
@@ -0,0 +1,151 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops: 
+
+// binary operations
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.and" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.and" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.or" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.or" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.xor" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.xor" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+// Fetch atomics
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile("atom.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  ctype neg_value = -value; \
+  asm volatile("atom.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(neg_value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile("atom.min" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile("atom.max" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  asm volatile("atom.inc" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  asm volatile("atom.inc" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  asm volatile("atom.dec" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  asm volatile("atom.dec" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+}
+// Group ops for integer ctypes
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR()
+
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
+
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_isglobal b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_isglobal
new file mode 100644 (file)
index 0000000..3d077ae
--- /dev/null
@@ -0,0 +1,208 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops:
+
+// binary operations
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  } else { \
+  asm volatile("atom.and"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  } \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  } else { \
+  asm volatile("atom.and"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  } \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  } else { \
+  asm volatile("atom.or"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  } \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  } else { \
+  asm volatile("atom.or"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  } \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  } else { \
+  asm volatile("atom.xor"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  } \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  } else { \
+  asm volatile("atom.xor"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  } \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+// Fetch atomics
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  } else { \
+  asm volatile("atom.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  } \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  ctype neg_value = -value; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(neg_value) : "memory"); \
+  } else { \
+  asm volatile("atom.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(neg_value) : "memory"); \
+  } \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  } else { \
+  asm volatile("atom.min"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  } \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  } else { \
+  asm volatile("atom.max"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  } \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } else { \
+  asm volatile("atom.inc"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } else { \
+  asm volatile("atom.inc"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } else { \
+  asm volatile("atom.dec"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  if(__isGlobal(dest)) { \
+  asm volatile("atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } else { \
+  asm volatile("atom.dec"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;" : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  } \
+  return result; \
+}
+
+// Group ops for integer ctypes
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR()
+
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
+
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_predicate b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc_predicate
new file mode 100644 (file)
index 0000000..4039448
--- /dev/null
@@ -0,0 +1,250 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops:
+
+// binary operations
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;\n\t" \
+          "@!p atom.and"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_and(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.and.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;\n\t" \
+          "@!p atom.and"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;\n\t" \
+          "@!p atom.or"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_or(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.or.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;\n\t" \
+          "@!p atom.or"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;\n\t" \
+          "@!p atom.xor"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_fetch_xor(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.xor.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;\n\t" \
+          "@!p atom.xor"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+// Fetch atomics
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_add(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.add"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_sub(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  ctype neg_value = -value; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.add"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(neg_value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_min(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.min"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_max(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result=0; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.max"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(value) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_inc(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.inc"       __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_inc_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.inc"       __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+inline __device__ ctype atomic_fetch_dec(ctype* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  ctype limit = desul::Impl::numeric_limits_max<ctype>::value; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.dec"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+} \
+inline __device__ ctype atomic_fetch_dec_mod(ctype* dest, ctype limit, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  ctype result = 0; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %1;\n\t" \
+          "@p  atom.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "@!p atom.dec"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_ctype " %0,[%1],%2;\n\t" \
+          "}\n\t" \
+    : reg_ret_ctype(result) : "l"(dest),reg_ctype(limit) : "memory"); \
+  return result; \
+}
+
+// Group ops for integer ctypes
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN(ctype,asm_ctype,reg_ctype,reg_ret_ctype) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX(ctype,asm_ctype,reg_ctype,reg_ret_ctype)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_OR() \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_XOR()
+
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(float,".f32","f","=f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD(double,".f64","d","=d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_SUB(double,".f64","d","=d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_UNSIGNED_OP(uint64_t,".u64","l","=l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int32_t,".s32","r","=r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INTEGER_OP(int64_t,".s64","l","=l")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC(uint32_t,".u32","r","=r")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC(uint32_t,".u32","r","=r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_BIN_OP()
+
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_ADD
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MIN
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_MAX
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_INC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_DEC
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_FETCH_AND
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc
new file mode 100644 (file)
index 0000000..ca02410
--- /dev/null
@@ -0,0 +1,18 @@
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_GENERIC
+#include "cuda_cc7_asm_atomic_op.inc_generic"
+#endif
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_ISGLOBAL
+#include "cuda_cc7_asm_atomic_op.inc_isglobal"
+#endif
+
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_PREDICATE
+#include "cuda_cc7_asm_atomic_op.inc_predicate"
+#endif
+
+// This version is not generally safe
+// Only here for performance comparison purposes
+#ifdef DESUL_IMPL_ATOMIC_CUDA_PTX_FORCEGLOBAL
+#include "cuda_cc7_asm_atomic_op.inc_forceglobal"
+#endif
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_forceglobal b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_forceglobal
new file mode 100644 (file)
index 0000000..3767b2a
--- /dev/null
@@ -0,0 +1,64 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops:
+
+// Non Returning Atomic Operations
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type neg_value = -value; \
+  asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(neg_value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile("red.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile("red.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  asm volatile("red.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
+inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  asm volatile("red.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+}
+
+// Group ops for integer types
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_generic b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_generic
new file mode 100644 (file)
index 0000000..5de36a3
--- /dev/null
@@ -0,0 +1,64 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops:
+
+// Non Returning Atomic Operations
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile("red.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type neg_value = -value; \
+  asm volatile("red.add" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(neg_value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile("red.min" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile("red.max" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  asm volatile("red.inc" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
+inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  asm volatile("red.dec" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+}
+
+// Group ops for integer types
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_isglobal b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_isglobal
new file mode 100644 (file)
index 0000000..ba89378
--- /dev/null
@@ -0,0 +1,88 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops:
+
+// Non Returning Atomic Operations
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  if(__isGlobal(dest)) { \
+  asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+  } else { \
+  asm volatile("red.add"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+  } \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type neg_value = -value; \
+  if(__isGlobal(dest)) { \
+  asm volatile("red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(neg_value) : "memory"); \
+  } else { \
+  asm volatile("red.add"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(neg_value) : "memory"); \
+  } \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  if(__isGlobal(dest)) { \
+  asm volatile("red.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+  } else { \
+  asm volatile("red.min"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+  } \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  if(__isGlobal(dest)) { \
+  asm volatile("red.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+  } else { \
+  asm volatile("red.max"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(value) : "memory"); \
+  } \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  if(__isGlobal(dest)) { \
+  asm volatile("red.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+  } else { \
+  asm volatile("red.inc"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+  } \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
+inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  if(__isGlobal(dest)) { \
+  asm volatile("red.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+  } else { \
+  asm volatile("red.dec"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;" :: "l"(dest),reg_type(limit) : "memory"); \
+  } \
+}
+
+// Group ops for integer types
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_predicate b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc_predicate
new file mode 100644 (file)
index 0000000..46e0ccf
--- /dev/null
@@ -0,0 +1,106 @@
+
+// Inline PTX: h u16 , r u32,  l u64, f f32, d f64
+// Ops:
+
+// Non Returning Atomic Operations
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+inline __device__ void atomic_add(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %0;\n\t" \
+          "@p  red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "@!p red.add"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "}\n\t" \
+    :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+inline __device__ void atomic_sub(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type neg_value = -value; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %0;\n\t" \
+          "@p  red.add.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "@!p red.add"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "}\n\t" \
+    :: "l"(dest),reg_type(neg_value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+inline __device__ void atomic_min(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %0;\n\t" \
+          "@p  red.min.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "@!p red.min"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "}\n\t" \
+    :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+inline __device__ void atomic_max(type* dest, type value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %0;\n\t" \
+          "@p  red.max.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "@!p red.max"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "}\n\t" \
+    :: "l"(dest),reg_type(value) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+inline __device__ void atomic_inc(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %0;\n\t" \
+          "@p  red.inc.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "@!p red.inc"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "}\n\t" \
+    :: "l"(dest),reg_type(limit) : "memory"); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type) \
+inline __device__ void atomic_dec(type* dest, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  type limit = desul::Impl::numeric_limits_max<type>::value; \
+  asm volatile( \
+          "{\n\t" \
+          ".reg .pred p;\n\t" \
+          "isspacep.global p, %0;\n\t" \
+          "@p  red.dec.global" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "@!p red.dec"        __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM asm_type " [%0],%1;\n\t" \
+          "}\n\t" \
+    :: "l"(dest),reg_type(limit) : "memory"); \
+}
+
+// Group ops for integer types
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type)
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MIN(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_MAX(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INC(type,asm_type,reg_type) \
+__DESUL_IMPL_CUDA_ASM_ATOMIC_DEC(type,asm_type,reg_type)
+
+// Instantiate Functions
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(float,".f32","f")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_ADD(double,".f64","d")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_SUB(double,".f64","d")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_UNSIGNED_OP(uint32_t,".u32","r")
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".u64","l")
+__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int32_t,".s32","r")
+//__DESUL_IMPL_CUDA_ASM_ATOMIC_INTEGER_OP(int64_t,".s64","l")
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange.inc
new file mode 100644 (file)
index 0000000..dfd2112
--- /dev/null
@@ -0,0 +1,20 @@
+
+// Non returning atomic operation (ptx red instruction) only exists for relaxed and release memorder
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE MemoryScopeDevice
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".gpu"
+#include "desul/atomics/cuda/cuda_cc7_asm_exchange_memorder.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE MemoryScopeNode
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".sys"
+#include "desul/atomics/cuda/cuda_cc7_asm_exchange_memorder.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE MemoryScopeCore
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".cta"
+#include "desul/atomics/cuda/cuda_cc7_asm_exchange_memorder.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange_memorder.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange_memorder.inc
new file mode 100644 (file)
index 0000000..7b4f7d0
--- /dev/null
@@ -0,0 +1,27 @@
+
+// Non returning atomic operation (ptx red instruction) only exists for relaxed and release memorder
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderRelaxed
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".relaxed"
+#include "desul/atomics/cuda/cuda_cc7_asm_exchange_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderRelease
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".release"
+#include "desul/atomics/cuda/cuda_cc7_asm_exchange_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderAcquire
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".acquire"
+#include "desul/atomics/cuda/cuda_cc7_asm_exchange_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderAcqRel
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".acq_rel"
+#include "desul/atomics/cuda/cuda_cc7_asm_exchange_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange_op.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_exchange_op.inc
new file mode 100644 (file)
index 0000000..51d9920
--- /dev/null
@@ -0,0 +1,40 @@
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_EXCHANGE() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_exchange(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.exch" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2;" : "=r"(asm_result) : "l"(dest),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_exchange(ctype* dest, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.exch" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2;" : "=l"(asm_result) : "l"(dest),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+#define __DESUL_IMPL_CUDA_ASM_ATOMIC_COMPARE_EXCHANGE() \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==4, ctype>::type atomic_compare_exchange(ctype* dest, ctype compare, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint32_t asm_value = reinterpret_cast<uint32_t&>(value); \
+  uint32_t asm_compare = reinterpret_cast<uint32_t&>(compare); \
+  uint32_t asm_result = 0u; \
+  asm volatile("atom.cas" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b32" " %0,[%1],%2,%3;" : "=r"(asm_result) : "l"(dest),"r"(asm_compare),"r"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+} \
+template<class ctype> \
+inline __device__ typename std::enable_if<sizeof(ctype)==8, ctype>::type atomic_compare_exchange(ctype* dest, ctype compare, ctype value, __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER, __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE) { \
+  uint64_t asm_value = reinterpret_cast<uint64_t&>(value); \
+  uint64_t asm_compare = reinterpret_cast<uint64_t&>(compare); \
+  uint64_t asm_result = 0u; \
+  asm volatile("atom.cas" __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM __DESUL_IMPL_CUDA_ASM_MEMORY_SCOPE_ASM ".b64" " %0,[%1],%2,%3;" : "=l"(asm_result) : "l"(dest),"l"(asm_compare),"l"(asm_value) : "memory"); \
+  return reinterpret_cast<ctype&>(asm_result); \
+}
+
+__DESUL_IMPL_CUDA_ASM_ATOMIC_EXCHANGE()
+__DESUL_IMPL_CUDA_ASM_ATOMIC_COMPARE_EXCHANGE()
+
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_EXCHANGE
+#undef __DESUL_IMPL_CUDA_ASM_ATOMIC_COMPARE_EXCHANGE
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_memorder.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/cuda/cuda_cc7_asm_memorder.inc
new file mode 100644 (file)
index 0000000..3eb613d
--- /dev/null
@@ -0,0 +1,29 @@
+
+// Non returning atomic operation (ptx red instruction) only exists for relaxed and release memorder
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderRelaxed
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".relaxed"
+#include "desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc"
+#include "desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderRelease
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".release"
+#include "desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc"
+#include "desul/atomics/cuda/cuda_cc7_asm_atomic_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderAcquire
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".acquire"
+#include "desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER MemoryOrderAcqRel
+#define __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM ".acq_rel"
+#include "desul/atomics/cuda/cuda_cc7_asm_atomic_fetch_op.inc"
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER
+#undef __DESUL_IMPL_CUDA_ASM_MEMORY_ORDER_ASM
+
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/openmp/OpenMP_40.hpp b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/openmp/OpenMP_40.hpp
new file mode 100644 (file)
index 0000000..f4f1bbd
--- /dev/null
@@ -0,0 +1,97 @@
+/* 
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#ifndef DESUL_ATOMICS_OPENMP40_HPP_
+#define DESUL_ATOMICS_OPENMP40_HPP_
+#include<type_traits>
+
+namespace desul {
+namespace Impl {
+  template<class MEMORY_ORDER_TMP, class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_pre_capture_flush(MEMORY_ORDER_TMP, MEMORY_SCOPE_TMP) {}
+  template<class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_pre_capture_flush(MemoryOrderAcquire, MEMORY_SCOPE_TMP) {
+    atomic_thread_fence(MemoryOrderAcquire(), MEMORY_SCOPE_TMP());
+  }
+  template<class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_pre_capture_flush(MemoryOrderAcqRel, MEMORY_SCOPE_TMP) {
+    atomic_thread_fence(MemoryOrderAcqRel(), MEMORY_SCOPE_TMP());
+  }
+  template<class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_pre_capture_flush(MemoryOrderSeqCst, MEMORY_SCOPE_TMP) {
+    atomic_thread_fence(MemoryOrderSeqCst(), MEMORY_SCOPE_TMP());
+  }
+
+  template<class MEMORY_ORDER_TMP, class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_post_capture_flush(MEMORY_ORDER_TMP, MEMORY_SCOPE_TMP) {}
+  template<class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_post_capture_flush(MemoryOrderRelease, MEMORY_SCOPE_TMP) {
+    atomic_thread_fence(MemoryOrderRelease(), MEMORY_SCOPE_TMP());
+  }
+  template<class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_post_capture_flush(MemoryOrderAcqRel, MEMORY_SCOPE_TMP) {
+    atomic_thread_fence(MemoryOrderAcqRel(), MEMORY_SCOPE_TMP());
+  }
+  template<class MEMORY_SCOPE_TMP>
+  void openmp_maybe_call_post_capture_flush(MemoryOrderSeqCst, MEMORY_SCOPE_TMP) {
+    atomic_thread_fence(MemoryOrderSeqCst(), MEMORY_SCOPE_TMP());
+  }
+
+  template<class T>
+  struct is_openmp_atomic_type_t {
+    static constexpr bool value = std::is_arithmetic<T>::value;
+  };
+  template<class T>
+  constexpr bool is_openmp_atomic_type_v = is_openmp_atomic_type_t<T>::value;
+}
+}
+
+namespace desul {
+// Can't use a macro approach to get all definitions since the ops include #pragma omp
+// So gonna use multiple inclusion of the same code snippet here.
+
+// Can't do Node level atomics this way with OpenMP Target, but we could 
+// have a define which says whether or not Device level IS node level (e.g. for pure CPU node)
+
+#define MEMORY_ORDER MemoryOrderRelaxed
+// #define MEMORY_SCOPE MemoryScopeNode
+// #include<desul/atomics/openmp/OpenMP_40_op.inc>
+// #undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeDevice
+#include<desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeCore
+#include<desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#undef MEMORY_ORDER
+
+#define MEMORY_ORDER MemoryOrderAcqRel
+// #define MEMORY_SCOPE MemoryScopeNode
+// #include<desul/atomics/openmp/OpenMP_40_op.inc>
+// #undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeDevice
+#include<desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeCore
+#include<desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#undef MEMORY_ORDER
+
+#define MEMORY_ORDER MemoryOrderSeqCst
+// #define MEMORY_SCOPE MemoryScopeNode
+// #include<desul/atomics/openmp/OpenMP_40_op.inc>
+// #undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeDevice
+#include<desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#define MEMORY_SCOPE MemoryScopeCore
+#include<desul/atomics/openmp/OpenMP_40_op.inc>
+#undef MEMORY_SCOPE
+#undef MEMORY_ORDER
+}  // namespace desul
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/openmp/OpenMP_40_op.inc b/bundled/kokkos-3.7.00/tpls/desul/include/desul/atomics/openmp/OpenMP_40_op.inc
new file mode 100644 (file)
index 0000000..a65f2a4
--- /dev/null
@@ -0,0 +1,101 @@
+
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_add(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());
+    #pragma omp atomic capture                                                    
+    { tmp = *dest;  *dest += value; }                                             
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_sub(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { tmp = *dest;  *dest -= value; }                                             
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_and(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    #pragma omp atomic capture                                                    
+    { tmp = *dest;  *dest &= value; }                                             
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_or(   
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { tmp = *dest;  *dest |= value; }                                             
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_fetch_xor(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { tmp = *dest;  *dest ^= value; }                                             
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_add_fetch(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { *dest += value; tmp = *dest; }                                              
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_sub_fetch(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { *dest -= value; tmp = *dest; }                                              
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_and_fetch(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { *dest &= value; tmp = *dest; }                                              
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_or_fetch(   
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { *dest |= value; tmp = *dest; }                                              
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }                                                                               
+  template <typename T>                                                           
+  std::enable_if_t<Impl::is_openmp_atomic_type_v<T>,T> atomic_xor_fetch(  
+      T* const dest, T value, MEMORY_ORDER, MEMORY_SCOPE) {                       
+    T tmp;                                                                        
+    Impl::openmp_maybe_call_pre_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());    
+    #pragma omp atomic capture                                                    
+    { *dest ^= value; tmp = *dest; }                                              
+    Impl::openmp_maybe_call_post_capture_flush(MEMORY_ORDER(), MEMORY_SCOPE());   
+    return tmp;                                                                   
+  }
diff --git a/bundled/kokkos-3.7.00/tpls/desul/src/Lock_Array_CUDA.cpp b/bundled/kokkos-3.7.00/tpls/desul/src/Lock_Array_CUDA.cpp
new file mode 100644 (file)
index 0000000..19944b3
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#include <cinttypes>
+#include <desul/atomics/Lock_Array.hpp>
+#include <sstream>
+#include <string>
+
+#ifdef DESUL_HAVE_CUDA_ATOMICS
+#ifdef __CUDACC_RDC__
+namespace desul {
+namespace Impl {
+__device__ __constant__ int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE = nullptr;
+__device__ __constant__ int32_t* CUDA_SPACE_ATOMIC_LOCKS_NODE = nullptr;
+}  // namespace Impl
+}  // namespace desul
+#endif
+
+namespace desul {
+
+namespace {
+
+__global__ void init_lock_arrays_cuda_kernel() {
+  unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
+  if (i < CUDA_SPACE_ATOMIC_MASK + 1) {
+    Impl::CUDA_SPACE_ATOMIC_LOCKS_DEVICE[i] = 0;
+    Impl::CUDA_SPACE_ATOMIC_LOCKS_NODE[i] = 0;
+  }
+}
+
+}  // namespace
+
+namespace Impl {
+
+int32_t* CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
+int32_t* CUDA_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
+
+// Putting this into anonymous namespace so we don't have multiple defined symbols
+// When linking in more than one copy of the object file
+namespace {
+
+void check_error_and_throw_cuda(cudaError e, const std::string msg) {
+  if (e != cudaSuccess) {
+    std::ostringstream out;
+    out << "Desul::Error: " << msg << " error(" << cudaGetErrorName(e)
+        << "): " << cudaGetErrorString(e);
+    throw std::runtime_error(out.str());
+  }
+}
+
+}  // namespace
+
+// define functions
+template <typename T>
+void init_lock_arrays_cuda() {
+  if (CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h != nullptr) return;
+  auto error_malloc1 = cudaMalloc(&CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h,
+                                  sizeof(int32_t) * (CUDA_SPACE_ATOMIC_MASK + 1));
+  check_error_and_throw_cuda(error_malloc1,
+                             "init_lock_arrays_cuda: cudaMalloc device locks");
+
+  auto error_malloc2 = cudaMallocHost(&CUDA_SPACE_ATOMIC_LOCKS_NODE_h,
+                                      sizeof(int32_t) * (CUDA_SPACE_ATOMIC_MASK + 1));
+  check_error_and_throw_cuda(error_malloc2,
+                             "init_lock_arrays_cuda: cudaMalloc host locks");
+
+  auto error_sync1 = cudaDeviceSynchronize();
+  copy_cuda_lock_arrays_to_device();
+  check_error_and_throw_cuda(error_sync1, "init_lock_arrays_cuda: post mallocs");
+  init_lock_arrays_cuda_kernel<<<(CUDA_SPACE_ATOMIC_MASK + 1 + 255) / 256, 256>>>();
+  auto error_sync2 = cudaDeviceSynchronize();
+  check_error_and_throw_cuda(error_sync2, "init_lock_arrays_cuda: post init kernel");
+}
+
+template <typename T>
+void finalize_lock_arrays_cuda() {
+  if (CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h == nullptr) return;
+  cudaFree(CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h);
+  cudaFreeHost(CUDA_SPACE_ATOMIC_LOCKS_NODE_h);
+  CUDA_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
+  CUDA_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
+#ifdef __CUDACC_RDC__
+  copy_cuda_lock_arrays_to_device();
+#endif
+}
+
+// Instantiate functions
+template void init_lock_arrays_cuda<int>();
+template void finalize_lock_arrays_cuda<int>();
+
+}  // namespace Impl
+
+}  // namespace desul
+#endif
diff --git a/bundled/kokkos-3.7.00/tpls/desul/src/Lock_Array_HIP.cpp b/bundled/kokkos-3.7.00/tpls/desul/src/Lock_Array_HIP.cpp
new file mode 100644 (file)
index 0000000..5ccc6f7
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+Copyright (c) 2019, Lawrence Livermore National Security, LLC
+and DESUL project contributors. See the COPYRIGHT file for details.
+Source: https://github.com/desul/desul
+
+SPDX-License-Identifier: (BSD-3-Clause)
+*/
+
+#include <cinttypes>
+#include <desul/atomics/Lock_Array.hpp>
+#include <sstream>
+#include <string>
+
+#ifdef DESUL_HAVE_HIP_ATOMICS
+#ifdef DESUL_HIP_RDC
+namespace desul {
+namespace Impl {
+__device__ __constant__ int32_t* HIP_SPACE_ATOMIC_LOCKS_DEVICE = nullptr;
+__device__ __constant__ int32_t* HIP_SPACE_ATOMIC_LOCKS_NODE = nullptr;
+}  // namespace Impl
+}  // namespace desul
+#endif
+
+namespace desul {
+
+namespace {
+
+__global__ void init_lock_arrays_hip_kernel() {
+  unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
+  if (i < HIP_SPACE_ATOMIC_MASK + 1) {
+    Impl::HIP_SPACE_ATOMIC_LOCKS_DEVICE[i] = 0;
+    Impl::HIP_SPACE_ATOMIC_LOCKS_NODE[i] = 0;
+  }
+}
+
+}  // namespace
+
+namespace Impl {
+
+int32_t* HIP_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
+int32_t* HIP_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
+
+// Putting this into anonymous namespace so we don't have multiple defined symbols
+// When linking in more than one copy of the object file
+namespace {
+
+void check_error_and_throw_hip(hipError_t e, const std::string msg) {
+  if (e != hipSuccess) {
+    std::ostringstream out;
+    out << "Desul::Error: " << msg << " error(" << hipGetErrorName(e)
+        << "): " << hipGetErrorString(e);
+    throw std::runtime_error(out.str());
+  }
+}
+
+}  // namespace
+
+template <typename T>
+void init_lock_arrays_hip() {
+  if (HIP_SPACE_ATOMIC_LOCKS_DEVICE_h != nullptr) return;
+
+  auto error_malloc1 = hipMalloc(&HIP_SPACE_ATOMIC_LOCKS_DEVICE_h,
+                                 sizeof(int32_t) * (HIP_SPACE_ATOMIC_MASK + 1));
+  check_error_and_throw_hip(error_malloc1,
+                            "init_lock_arrays_hip: hipMalloc device locks");
+
+  auto error_malloc2 = hipHostMalloc(&HIP_SPACE_ATOMIC_LOCKS_NODE_h,
+                                     sizeof(int32_t) * (HIP_SPACE_ATOMIC_MASK + 1));
+  check_error_and_throw_hip(error_malloc2,
+                            "init_lock_arrays_hip: hipMallocHost host locks");
+
+  auto error_sync1 = hipDeviceSynchronize();
+  DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
+  check_error_and_throw_hip(error_sync1, "init_lock_arrays_hip: post malloc");
+
+  init_lock_arrays_hip_kernel<<<(HIP_SPACE_ATOMIC_MASK + 1 + 255) / 256, 256>>>();
+
+  auto error_sync2 = hipDeviceSynchronize();
+  check_error_and_throw_hip(error_sync2, "init_lock_arrays_hip: post init");
+}
+
+template <typename T>
+void finalize_lock_arrays_hip() {
+  if (HIP_SPACE_ATOMIC_LOCKS_DEVICE_h == nullptr) return;
+  auto error_free1 = hipFree(HIP_SPACE_ATOMIC_LOCKS_DEVICE_h);
+  check_error_and_throw_hip(error_free1, "finalize_lock_arrays_hip: free device locks");
+  auto error_free2 = hipHostFree(HIP_SPACE_ATOMIC_LOCKS_NODE_h);
+  check_error_and_throw_hip(error_free2, "finalize_lock_arrays_hip: free host locks");
+  HIP_SPACE_ATOMIC_LOCKS_DEVICE_h = nullptr;
+  HIP_SPACE_ATOMIC_LOCKS_NODE_h = nullptr;
+#ifdef DESUL_HIP_RDC
+  DESUL_IMPL_COPY_HIP_LOCK_ARRAYS_TO_DEVICE();
+#endif
+}
+
+template void init_lock_arrays_hip<int>();
+template void finalize_lock_arrays_hip<int>();
+
+}  // namespace Impl
+
+}  // namespace desul
+#endif
index e84e888aed4d31fd086f32755baf8d2f19268930..ef62d1ae82f057f23cc26a3445131aaf69e3ca2c 100644 (file)
@@ -30,6 +30,18 @@ OPTION(DEAL_II_FORCE_BUNDLED_BOOST
 
 SET(BOOST_FOLDER "${CMAKE_SOURCE_DIR}/bundled/boost-1.70.0")
 
+#
+# Kokkos
+#
+
+SET(FEATURE_KOKKOS_HAVE_BUNDLED TRUE)
+
+OPTION(DEAL_II_FORCE_BUNDLED_KOKKOS
+  "Always use the bundled Kokkos library instead of an external one."
+  OFF)
+
+SET(KOKKOS_FOLDER "${CMAKE_SOURCE_DIR}/bundled/kokkos-3.7.00")
+
 #
 # Taskflow
 #
index ff05196e960a655b4e11acdb2148e248e2ab83a8..61619bcadaa81030db32d38044486cedbfc60ab7 100644 (file)
@@ -177,6 +177,14 @@ MACRO(FEATURE_TRILINOS_FIND_EXTERNAL var)
     ENDIF()
 
     IF(DEAL_II_TRILINOS_WITH_KOKKOS)
+      IF(DEAL_II_FORCE_BUNDLED_KOKKOS)
+        SET(TRILINOS_ADDITIONAL_ERROR_STRING
+          ${TRILINOS_ADDITIONAL_ERROR_STRING}
+          "The Trilinos installation (found at \"${TRILINOS_DIR}\")"
+          "includes Kokkos, but DEAL_II_FORCE_BUNDLED_KOKKOS=ON!\n")
+        SET(${var} FALSE)
+      ENDIF()
+
       IF(Kokkos_ENABLE_CUDA)
         # We need to disable SIMD vectorization for CUDA device code.
         # Otherwise, nvcc compilers from version 9 on will emit an error message like:
index 0cd0d48b43d0bec2b804855a5692f8056544203c..2d08d36a56350b443a56bfa6538ef069d0f1dab6 100644 (file)
 # Configuration for Kokkos support in deal.II:
 #
 
+SET(DEAL_II_WITH_KOKKOS ON # Always true. We need it :-]
+  CACHE BOOL "Build deal.II with support for Kokkos." FORCE
+  )
+
+MACRO(FEATURE_KOKKOS_CONFIGURE_BUNDLED)
+  SET(KOKKOS_BUNDLED_INCLUDE_DIRS
+    ${KOKKOS_FOLDER}/algorithms/src
+    ${KOKKOS_FOLDER}/containers/src
+    ${KOKKOS_FOLDER}/core/src
+    ${KOKKOS_FOLDER}/simd/src
+    ${KOKKOS_FOLDER}/tpls/desul/include
+    )
+ENDMACRO()
+
 CONFIGURE_FEATURE(KOKKOS)
+
+#
+# DEAL_II_WITH_KOKKOS is always required.
+#
+IF(NOT DEAL_II_WITH_KOKKOS)
+  IF(DEAL_II_FEATURE_AUTODETECTION)
+    FEATURE_ERROR_MESSAGE("KOKKOS")
+  ELSE()
+    MESSAGE(FATAL_ERROR "\n"
+      "Unmet configuration requirements: "
+      "DEAL_II_WITH_KOKKOS required, but set to OFF!.\n\n"
+      )
+  ENDIF()
+ENDIF()
index 44c8715e892116854b6ba4368c24caa2ba8f5776..02c94820e7e6de7f1e2f050a1fab24f06e6a3e3d 100644 (file)
@@ -1,6 +1,6 @@
 ## ---------------------------------------------------------------------
 ##
-## Copyright (C) 2021 by the deal.II authors
+## Copyright (C) 2021 - 2022 by the deal.II authors
 ##
 ## This file is part of the deal.II library.
 ##
@@ -32,9 +32,12 @@ IF(DEAL_II_TRILINOS_WITH_KOKKOS)
   # Let deal.II know that we have found Kokkos
   SET(KOKKOS_FOUND ON)
 ELSE()
-  FIND_PACKAGE(Kokkos
+  # temporarily disable ${CMAKE_SOURCE_DIR}/cmake/modules for module lookup
+  LIST(REMOVE_ITEM CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules/)
+  FIND_PACKAGE(Kokkos 3.7.0
     HINTS ${KOKKOS_DIR} ${Kokkos_DIR} $ENV{Kokkos_DIR}
     )
+  LIST(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules/)
 
   IF(Kokkos_FOUND)
     # We are only interested in Kokkos if it is not part of Trilinos
index a07b803174be96deaf3779b7df1bba585ece2c16..0927bbe296c2f4b909756ddc44b4ed3ec9abc9e2 100644 (file)
@@ -82,7 +82,7 @@
         with the way PETSc is configured.
       </li>
       <li>
-        <acronym>deal.II</acronym> includes copies of boost, muParser, TBB, and
+        <acronym>deal.II</acronym> includes copies of boost, Kokkos, muParser, TBB, and
         UMFPACK. These bundled packages should be disabled
         via <tt>-DDEAL_II_ALLOW_BUNDLED=OFF</tt> and these dependencies should
         instead be resolved by ensuring that whenever <acronym>deal.II</acronym>
diff --git a/doc/news/changes/major/20221026Arndt b/doc/news/changes/major/20221026Arndt
new file mode 100644 (file)
index 0000000..2ede06d
--- /dev/null
@@ -0,0 +1,3 @@
+New: Kokkos is now a required dependency.
+<br>
+(Daniel Arndt, 2022/10/26)
index 9e7d07333a8af26412b1a3360bf0c1d716412b43..90f0c8c065d10afcf88344ce37864f416ddd2e1f 100644 (file)
                 <p>
                     <a href="https://github.com/kokkos/kokkos" target="_top">Kokkos</a> implements a programming model in C++ for writing performance portable applications targeting all major HPC platforms.
                     To use a self compiled version, pass <code>-DKOKKOS_DIR=/path/to/kokkos</code> to the deal.II CMake call.
+                    The compiler must be able to compile code for all enabled backends. In case Cuda is enabled in Kokkos and nvcc should be used as device compiler,
+                    it is recommended to use the nvcc_wrapper script that comes with Kokkos as C++ compiler. clang++ or nvc++ (for Kokkos >= 4.0) could be used directly as C++ compiler.
+                    Also, Kokkos must be built with support for device lambdas, e.g., Kokkos_ENABLE_CUDA_LAMBDA=ON when configuring Kokkos with Cuda. For Kokkos >= 4.0 this is the default.
                 </p>
             </dd>
 
             <a href="http://faculty.cse.tamu.edu/davis/suitesparse.html">
           UMFPACK</a>,
             <a href="http://threadingbuildingblocks.org/" target="_top">Threading Building Blocks</a>,
-            <a href="http://www.boost.org/" target="_top">BOOST</a> and
+            <a href="http://www.boost.org/" target="_top">BOOST</a>,
+            <a href="http://github.com/kokkos/kokkos" target="_top">Kokkos</a>, and
             <a href="http://muparser.beltoforion.de/" target="_top">muparser</a>, courtesy of their authors. These are also covered by their own licenses; please refer to their webpages for more information.
         </li>
     </ul>
index 799e3f5f78dba923b3558f061d746aa8308c9d79..ee0cf813f9a48f969adddca84c6e6532ff8048cb 100644 (file)
@@ -511,7 +511,7 @@ DEAL_II_WITH_ZLIB
           </p>
         <li>
           <p>
-            <code>DEAL_II_FORCE_BUNDLED_(BOOST|MUPARSER|THREADS|UMFPACK)</code>:
+            <code>DEAL_II_FORCE_BUNDLED_(BOOST|KOKKOS|MUPARSER|THREADS|UMFPACK)</code>:
           </p>
           <p>
             Forces the use of the bundled library regardless whether
index 888dffcd7d7b16721d1455733438f93709a2de6c..2bb11c61ff7f2fdaa61195fd65ceceb400959902 100644 (file)
@@ -481,6 +481,7 @@ _Pragma("GCC diagnostic ignored \"-Wdeprecated-copy\"")                 \
 _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")         \
 _Pragma("GCC diagnostic ignored \"-Wdeprecated-volatile\"")             \
 _Pragma("GCC diagnostic ignored \"-Wexpansion-to-defined\"")            \
+_Pragma("GCC diagnostic ignored \"-Wfloat-conversion\"")                \
 _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"")              \
 _Pragma("GCC diagnostic ignored \"-Wignored-qualifiers\"")              \
 _Pragma("GCC diagnostic ignored \"-Wimplicit-fallthrough\"")            \
index f09ffb83d45476b64c7a37a79c53e0528a0598ee..2e6589caff401c359b6eeb25512f86627dd9c8a3 100644 (file)
@@ -196,6 +196,9 @@ IF (DEAL_II_WITH_SYMENGINE)
   make_quicktest("symengine" ${_mybuild} "")
 ENDIF()
 
+# Test Kokkos
+make_quicktest("kokkos" ${_mybuild} "")
+
 # A custom test target:
 ADD_CUSTOM_TARGET(test
   COMMAND ${CMAKE_COMMAND} -D ALL_TESTS="${ALL_TESTS}" -DCMAKE_BUILD_TYPE=${_mybuild} -P ${CMAKE_CURRENT_SOURCE_DIR}/run.cmake
diff --git a/tests/quick_tests/kokkos.cc b/tests/quick_tests/kokkos.cc
new file mode 100644 (file)
index 0000000..8bce488
--- /dev/null
@@ -0,0 +1,40 @@
+
+#include <deal.II/base/config.h>
+DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
+#include <Kokkos_Core.hpp>
+DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
+
+struct FillFunctor
+{
+  KOKKOS_FUNCTION void
+  operator()(const int i) const
+  {
+    m_values(i) = i;
+  }
+  Kokkos::View<int *> m_values;
+};
+
+struct SumFunctor
+{
+  KOKKOS_FUNCTION void
+  operator()(const int i, int &sum) const
+  {
+    sum += m_values(i);
+  }
+  Kokkos::View<int *> m_values;
+};
+
+int
+main(int argc, char *argv[])
+{
+  const long n = 100;
+  int        sum;
+  Kokkos::initialize(argc, argv);
+  {
+    Kokkos::View<int *> values("values", n);
+    Kokkos::parallel_for(n, FillFunctor{values});
+    Kokkos::parallel_reduce(n, SumFunctor{values}, sum);
+  }
+  Kokkos::finalize();
+  return (sum != (n * (n - 1)) / 2);
+}

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.